hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
083cd985e8490fd2529b4838358dcdef0b1b20a5
| 92
|
py
|
Python
|
DisplayPane/Widgets/LabelEditor/PolygonEditor.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
DisplayPane/Widgets/LabelEditor/PolygonEditor.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
DisplayPane/Widgets/LabelEditor/PolygonEditor.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
from .LabelEditorABC import LabelEditorABC
class PolygonEditor(LabelEditorABC):
pass
| 13.142857
| 42
| 0.804348
| 8
| 92
| 9.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 92
| 6
| 43
| 15.333333
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f229f311f3f4ccf1acd2dfce868c83a1f8ef46e4
| 31
|
py
|
Python
|
pipeline/src/__init__.py
|
sawyerwatts/StopSpotDataPipeline
|
6537d0d1779d9ffa6a3096c02f4081d659c12a0e
|
[
"MIT"
] | 3
|
2020-02-19T05:25:56.000Z
|
2020-02-22T21:31:34.000Z
|
pipeline/src/__init__.py
|
sawyerwatts/StopSpotDataPipeline
|
6537d0d1779d9ffa6a3096c02f4081d659c12a0e
|
[
"MIT"
] | 69
|
2020-02-20T20:30:03.000Z
|
2020-05-29T01:20:05.000Z
|
pipeline/src/__init__.py
|
wolakdav/TeamBeeCapstoneProject
|
6957416273fda85a12e86408ae635d7491fb1035
|
[
"MIT"
] | 4
|
2020-06-05T03:47:49.000Z
|
2020-12-21T01:17:02.000Z
|
from src.client import _Client
| 15.5
| 30
| 0.83871
| 5
| 31
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f23978df56cad21b8a64bac428b7aa1b0c27293c
| 272
|
py
|
Python
|
tests/test_de.py
|
alexmisk/pyserde
|
bfa8629240950657f750464dbb80d8160a8f8070
|
[
"MIT"
] | null | null | null |
tests/test_de.py
|
alexmisk/pyserde
|
bfa8629240950657f750464dbb80d8160a8f8070
|
[
"MIT"
] | 10
|
2020-11-03T07:30:06.000Z
|
2021-09-01T06:47:13.000Z
|
tests/test_de.py
|
alexmisk/pyserde
|
bfa8629240950657f750464dbb80d8160a8f8070
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Union
from serde.de import from_obj
def test_from_obj():
assert not from_obj(int, None, False, True)
assert "a" == from_obj(Union[int, str], "a", False, True)
assert ("a", "b") == from_obj(Tuple[str, str], ("a", "b"), False, True)
| 27.2
| 75
| 0.639706
| 45
| 272
| 3.733333
| 0.422222
| 0.208333
| 0.178571
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183824
| 272
| 9
| 76
| 30.222222
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.022059
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f27c1b296ba314e2b5a56250cea1770949c60d6f
| 85
|
py
|
Python
|
shop_simplevariations/tests/__init__.py
|
pjdelport/django-shop-simplevariations
|
72ecfe2ebe31ccbd51a745f954a15ce848c48be9
|
[
"BSD-3-Clause"
] | 9
|
2015-03-14T20:55:06.000Z
|
2021-06-06T11:50:18.000Z
|
shop_simplevariations/tests/__init__.py
|
shyba/django-shop-simplevariations
|
e62e2cdddf4e4caed89860c191e94bc6fb6a3346
|
[
"BSD-3-Clause"
] | 2
|
2016-08-10T18:54:19.000Z
|
2016-10-03T13:46:16.000Z
|
shop_simplevariations/tests/__init__.py
|
shyba/django-shop-simplevariations
|
e62e2cdddf4e4caed89860c191e94bc6fb6a3346
|
[
"BSD-3-Clause"
] | 8
|
2015-01-08T18:00:07.000Z
|
2019-04-13T23:22:57.000Z
|
from .cart_modifier import *
from .simplevariation_tags import *
from .views import *
| 28.333333
| 35
| 0.8
| 11
| 85
| 6
| 0.636364
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 85
| 3
| 36
| 28.333333
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2910a5c61c89b31230f4cbac7d9b435ab4085ce
| 99
|
py
|
Python
|
taskflow/tests/test_task_run.py
|
jiangxianfu/smarttaskflow
|
c661d9776bc98823396d423e33b121933d4c3611
|
[
"MIT"
] | 9
|
2020-02-25T01:23:10.000Z
|
2022-01-29T10:14:13.000Z
|
taskflow/tests/test_task_run.py
|
jiangxianfu/smarttaskflow
|
c661d9776bc98823396d423e33b121933d4c3611
|
[
"MIT"
] | null | null | null |
taskflow/tests/test_task_run.py
|
jiangxianfu/smarttaskflow
|
c661d9776bc98823396d423e33b121933d4c3611
|
[
"MIT"
] | 5
|
2020-02-23T14:32:56.000Z
|
2022-01-07T17:48:03.000Z
|
# -*- coding: utf-8 -*-
def test_task_run():
print("test task run module")
assert 1 == 1
| 14.142857
| 33
| 0.565657
| 15
| 99
| 3.6
| 0.733333
| 0.296296
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 0.252525
| 99
| 6
| 34
| 16.5
| 0.689189
| 0.212121
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b2e090c6474fa2bd4eb423189555879fcc84e04
| 439
|
py
|
Python
|
src/pyconmech/init_template.py
|
yijiangh/conmech
|
9f24230f08587c5e62e3b482f8829f5ea449a169
|
[
"MIT"
] | 10
|
2018-12-10T17:52:15.000Z
|
2021-05-12T05:49:34.000Z
|
src/pyconmech/init_template.py
|
yijiangh/conmech
|
9f24230f08587c5e62e3b482f8829f5ea449a169
|
[
"MIT"
] | 32
|
2018-11-28T04:00:24.000Z
|
2020-03-14T21:20:38.000Z
|
src/pyconmech/init_template.py
|
yijiangh/conmech
|
9f24230f08587c5e62e3b482f8829f5ea449a169
|
[
"MIT"
] | 1
|
2020-09-23T01:19:00.000Z
|
2020-09-23T01:19:00.000Z
|
# https://github.com/jpanetta/MeshFEM/blob/master/python/init_template.py
################################################################################
# auto-generated from @PROJECT_SOURCE_DIR@/src/pyconmech/init_template.py
################################################################################
import sys as _sys
_sys.path.insert(0, '@PROJECT_SOURCE_DIR@/src/pyconmech')
from _pystiffness_checker import _StiffnessChecker
| 54.875
| 80
| 0.52164
| 40
| 439
| 5.45
| 0.7
| 0.110092
| 0.12844
| 0.174312
| 0.256881
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002381
| 0.04328
| 439
| 8
| 81
| 54.875
| 0.516667
| 0.328018
| 0
| 0
| 1
| 0
| 0.255639
| 0.255639
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b32ed49c9ab2b0ce0485bee3ada01060d5fb711
| 283
|
py
|
Python
|
xdnlp/bert/layers/__init__.py
|
mikuh/xdnlp
|
1da294659e276c59c620a9ebbab875f1d6fbb038
|
[
"MIT"
] | 1
|
2022-02-08T03:27:32.000Z
|
2022-02-08T03:27:32.000Z
|
xdnlp/bert/layers/__init__.py
|
mikuh/xdnlp
|
1da294659e276c59c620a9ebbab875f1d6fbb038
|
[
"MIT"
] | null | null | null |
xdnlp/bert/layers/__init__.py
|
mikuh/xdnlp
|
1da294659e276c59c620a9ebbab875f1d6fbb038
|
[
"MIT"
] | null | null | null |
from xdnlp.bert.layers.position_embedding import PositionEmbedding
from xdnlp.bert.layers.self_attention_mask import SelfAttentionMask
from xdnlp.bert.layers.on_device_embedding import OnDeviceEmbedding
from xdnlp.bert.layers.transformer_encoder_block import TransformerEncoderBlock
| 56.6
| 79
| 0.90106
| 35
| 283
| 7.085714
| 0.542857
| 0.145161
| 0.209677
| 0.306452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056537
| 283
| 4
| 80
| 70.75
| 0.928839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b3a070740133baf9138a1d01a459a8ff6643dd3
| 4,061
|
py
|
Python
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/NonterminalAddRemoveMixedTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:08.000Z
|
2021-02-04T12:41:08.000Z
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/NonterminalAddRemoveMixedTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 3
|
2017-07-08T16:28:52.000Z
|
2020-04-23T18:06:24.000Z
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/NonterminalAddRemoveMixedTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:10.000Z
|
2021-02-04T12:41:10.000Z
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 03.08.2017 12:28
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.old_api import Grammar
from grammpy.old_api import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalAddRemoveMixedTest(TestCase):
def test_add_remove_add_one(self):
gr = Grammar()
self.assertEqual(gr.nonterms_count(), 0)
self.assertFalse(gr.have_nonterm(TempClass))
self.assertIsNone(gr.get_nonterm(TempClass))
self.assertIsNone(gr.nonterm(TempClass))
self.assertEqual(gr.add_nonterm(TempClass), [TempClass])
self.assertEqual(gr.nonterms_count(), 1)
self.assertIsNotNone(gr.get_nonterm(TempClass))
self.assertIsNotNone(gr.nonterm(TempClass))
self.assertEqual(gr.nonterm(TempClass), TempClass)
self.assertEqual(gr.remove_nonterm(TempClass), [TempClass])
self.assertEqual(gr.nonterms_count(), 0)
self.assertFalse(gr.have_nonterm(TempClass))
self.assertIsNone(gr.get_nonterm(TempClass))
self.assertIsNone(gr.nonterm(TempClass))
def test_addTwoRemoveOneAndAddThird(self):
gr = Grammar()
self.assertEqual(gr.add_nonterm(TempClass), [TempClass])
self.assertEqual(gr.add_nonterm(Second), [Second])
self.assertEqual(gr.nonterms_count(), 2)
self.assertIsNotNone(gr.get_nonterm(TempClass))
self.assertIsNotNone(gr.nonterm(TempClass))
self.assertEqual(gr.get_nonterm(TempClass), TempClass)
self.assertIsNotNone(gr.get_nonterm(Second))
self.assertIsNotNone(gr.nonterm(Second))
self.assertEqual(gr.get_nonterm(Second), Second)
self.assertEqual(gr.remove_nonterm(Second), [Second])
self.assertEqual(gr.nonterms_count(), 1)
self.assertIsNotNone(gr.get_nonterm(TempClass))
self.assertIsNotNone(gr.nonterm(TempClass))
self.assertEqual(gr.nonterm(TempClass), TempClass)
self.assertIsNone(gr.get_nonterm(Second))
self.assertEqual(gr.add_nonterm(Third), [Third])
self.assertEqual(gr.nonterms_count(), 2)
self.assertIsNotNone(gr.get_nonterm(TempClass))
self.assertIsNotNone(gr.nonterm(TempClass))
self.assertEqual(gr.get_nonterm(TempClass), TempClass)
self.assertFalse(gr.have_nonterm(Second))
self.assertIsNone(gr.nonterm(Second))
self.assertIsNotNone(gr.get_nonterm(Third))
self.assertIsNotNone(gr.nonterm(Third))
self.assertEqual(gr.get_nonterm(Third), Third)
def test_addThreeRemoveTwoInArray(self):
gr = Grammar()
self.assertEqual(gr.add_nonterm([TempClass, Second, Third]), [TempClass, Second, Third])
self.assertEqual(gr.nonterms_count(), 3)
self.assertIsNotNone(gr.get_nonterm(TempClass))
self.assertIsNotNone(gr.nonterm(TempClass))
self.assertEqual(gr.nonterm(TempClass), TempClass)
self.assertIsNotNone(gr.get_nonterm(Second))
self.assertIsNotNone(gr.nonterm(Second))
self.assertEqual(gr.nonterm(Second), Second)
self.assertIsNotNone(gr.get_nonterm(Third))
self.assertIsNotNone(gr.nonterm(Third))
self.assertEqual(gr.nonterm(Third), Third)
self.assertEqual(gr.remove_nonterm([Third, TempClass]), [Third, TempClass])
self.assertEqual(gr.nonterms_count(), 1)
self.assertTrue(gr.have_nonterm(Second))
self.assertFalse(gr.have_nonterm(TempClass))
self.assertFalse(gr.have_nonterm(Third))
self.assertEqual(gr.add_nonterm(Third), [Third])
self.assertEqual(gr.nonterms_count(), 2)
self.assertIsNotNone(gr.nonterm(Second))
self.assertEqual(gr.nonterm(Second), Second)
self.assertIsNotNone(gr.get_nonterm(Second))
self.assertIsNotNone(gr.nonterm(Third))
self.assertEqual(gr.nonterm(Third), Third)
self.assertIsNotNone(gr.get_nonterm(Third))
if __name__ == '__main__':
main()
| 39.427184
| 96
| 0.699828
| 458
| 4,061
| 6.078603
| 0.126638
| 0.15625
| 0.177083
| 0.10273
| 0.846624
| 0.799569
| 0.721624
| 0.706897
| 0.668463
| 0.646193
| 0
| 0.006299
| 0.17902
| 4,061
| 102
| 97
| 39.813725
| 0.828734
| 0.024378
| 0
| 0.691358
| 0
| 0
| 0.002024
| 0
| 0
| 0
| 0
| 0
| 0.777778
| 1
| 0.037037
| false
| 0.037037
| 0.037037
| 0
| 0.123457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b63cc4511ffc6f06c746a09ef3301a5c735fe58
| 191
|
py
|
Python
|
server/common_models/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | null | null | null |
server/common_models/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | null | null | null |
server/common_models/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | 1
|
2019-10-31T06:11:41.000Z
|
2019-10-31T06:11:41.000Z
|
# coding=utf-8
from __future__ import absolute_import
from .user import *
from .media import *
from .book import *
from .category import *
from .configuration import *
from .notify import *
| 19.1
| 38
| 0.759162
| 26
| 191
| 5.384615
| 0.5
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.162304
| 191
| 9
| 39
| 21.222222
| 0.86875
| 0.062827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
299faf5bc659ee7d835f5448efd5eb8fa98854df
| 3,223
|
py
|
Python
|
data/load_mnist.py
|
gavinlive/perception
|
7b44d896a3f2ed3afb0376c394a5de3f8a4e4304
|
[
"MIT"
] | 3
|
2019-03-18T16:16:20.000Z
|
2020-10-18T14:25:18.000Z
|
data/load_mnist.py
|
gavinlive/perception
|
7b44d896a3f2ed3afb0376c394a5de3f8a4e4304
|
[
"MIT"
] | 13
|
2019-12-16T21:18:15.000Z
|
2021-07-27T18:55:01.000Z
|
data/load_mnist.py
|
gavinlive/perception
|
7b44d896a3f2ed3afb0376c394a5de3f8a4e4304
|
[
"MIT"
] | null | null | null |
import pickle as cpk
import numpy as np
import random
import os
import tensorflow as tf
def load_data():
dir_path = os.path.dirname(os.path.realpath(__file__))
filename= dir_path + '/rmnist_expanded_10.pkl'
print("Loading data")
with open(filename, 'rb') as fname:
print("Opened data successfully")
mnist = cpk.load(fname, encoding='latin1') # latin1 due to incompatibility between pickle in python2 and python3
mnist_data = np.array(mnist[0][0]) # (900, 784)
mnist_labels = np.array(mnist[0][1]) # (900,)
#mnist_labels_mat = np.zeros(list(np.shape(mnist_labels))+[10], dtype=np.int8)
def insert_one(this_array, indx):
this_array[indx] = 1
return this_array
print("Creating labels")
mnist_labels = np.array([insert_one(np.zeros([10]),x) for x in mnist_labels])
mnist_labels = mnist_labels.astype(np.float64)
print("Finished creating labels")
del mnist
#mnist_test_data = mnist[1][0] # (10000, 784)
#mnist_test_labels = mnist[1][1] # (10000,)
data_idx = random.sample(range(900), 250)
train_data_idx = data_idx[0:200]
test_data_idx = data_idx[200:250]
mnist_train_data = mnist_data[train_data_idx, :]
mnist_train_labels = mnist_labels[train_data_idx]
mnist_test_data = mnist_data[test_data_idx, :]
mnist_test_labels = mnist_data[test_data_idx]
mnist_train_data = np.reshape(mnist_train_data, [200, 28, 28])
mnist_test_data = np.reshape(mnist_test_data,[50, 28, 28])
print("Finished loading reduced-size MNIST dataset (200 training, 50 test)")
return mnist_train_data, mnist_train_labels, mnist_test_data, mnist_test_labels
def load_data_light(tr=1,te=2):
dir_path = os.path.dirname(os.path.realpath(__file__))
filename= dir_path + '/rmnist_expanded_10.pkl'
print("Loading data")
with open(filename, 'rb') as fname:
print("Opened data successfully")
mnist = cpk.load(fname, encoding='latin1') # latin1 due to incompatibility between pickle in python2 and python3
mnist_data = np.array(mnist[0][0]) # (900, 784)
mnist_labels = np.array(mnist[0][1]) # (900,)
def insert_one(this_array, indx):
this_array[indx] = 1
return this_array
print("Creating labels")
mnist_labels = np.array([insert_one(np.zeros([10]),x) for x in mnist_labels])
mnist_labels = mnist_labels.astype(np.float32)
print("Finished creating labels")
del mnist
#mnist_test_data = mnist[1][0] # (10000, 784)
#mnist_test_labels = mnist[1][1] # (10000,)
data_idx = random.sample(range(900), tr+te)
train_data_idx = data_idx[0:tr]
test_data_idx = data_idx[tr:tr+te]
mnist_train_data = mnist_data[train_data_idx, :]
mnist_train_labels = mnist_labels[train_data_idx]
mnist_test_data = mnist_data[test_data_idx, :]
mnist_test_labels = mnist_data[test_data_idx]
mnist_train_data = np.reshape(mnist_train_data, [tr, 28, 28])
mnist_test_data = np.reshape(mnist_test_data,[te, 28, 28])
print("Finished loading reduced-size MNIST dataset (%d training, %d test)" % (tr, te))
print(mnist_train_data.dtype)
return mnist_train_data, mnist_train_labels, mnist_test_data, mnist_test_labels
| 37.476744
| 120
| 0.70152
| 490
| 3,223
| 4.328571
| 0.177551
| 0.059406
| 0.061292
| 0.050919
| 0.874116
| 0.857143
| 0.838284
| 0.838284
| 0.838284
| 0.793965
| 0
| 0.050208
| 0.178095
| 3,223
| 85
| 121
| 37.917647
| 0.750472
| 0.129072
| 0
| 0.645161
| 0
| 0
| 0.123612
| 0.016482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.080645
| 0
| 0.209677
| 0.177419
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29a752dd2d61d85f75da7fa19325af7df767e39e
| 194
|
py
|
Python
|
sos4hjb/polynomials/__init__.py
|
TobiaMarcucci/sos4hjb
|
d8bd5c0179891ff09f11be48777bef148d952a2d
|
[
"MIT"
] | 3
|
2020-07-05T17:36:06.000Z
|
2021-11-20T10:41:58.000Z
|
sos4hjb/polynomials/__init__.py
|
TobiaMarcucci/sos4hjb
|
d8bd5c0179891ff09f11be48777bef148d952a2d
|
[
"MIT"
] | null | null | null |
sos4hjb/polynomials/__init__.py
|
TobiaMarcucci/sos4hjb
|
d8bd5c0179891ff09f11be48777bef148d952a2d
|
[
"MIT"
] | 1
|
2022-01-25T06:39:56.000Z
|
2022-01-25T06:39:56.000Z
|
from .variable import Variable
from .basis_vector import BasisVector
from .monomial_vector import MonomialVector
from .chebyshev_vector import ChebyshevVector
from .polynomial import Polynomial
| 32.333333
| 45
| 0.871134
| 23
| 194
| 7.217391
| 0.478261
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 194
| 5
| 46
| 38.8
| 0.954023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
29ea297041aaa45cc6f58d7b98f3feae2e31a411
| 158
|
py
|
Python
|
graphish/__init__.py
|
cmoore94/graphish
|
1908743374f48aed1dd17997765948a8c7befcd8
|
[
"MIT"
] | 29
|
2019-06-19T17:13:16.000Z
|
2021-07-28T21:48:07.000Z
|
graphish/__init__.py
|
cmoore94/graphish
|
1908743374f48aed1dd17997765948a8c7befcd8
|
[
"MIT"
] | 6
|
2019-08-02T19:56:06.000Z
|
2022-02-15T19:31:25.000Z
|
graphish/__init__.py
|
cmoore94/graphish
|
1908743374f48aed1dd17997765948a8c7befcd8
|
[
"MIT"
] | 9
|
2019-07-29T10:59:08.000Z
|
2022-01-25T17:32:46.000Z
|
from graphish.connector import GraphConnector
from graphish.search import Search
from graphish.delete import Delete
from graphish.mailfolder import MailFolder
| 39.5
| 45
| 0.879747
| 20
| 158
| 6.95
| 0.4
| 0.345324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 158
| 4
| 46
| 39.5
| 0.972028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b0a890880affa7ee7e611618c3651ae006102a8
| 26
|
py
|
Python
|
recOrder/__init__.py
|
mehta-lab/recOrder
|
67f2edb9ab13114dfe41d57e465ae24f961b0004
|
[
"Unlicense"
] | 2
|
2022-01-19T21:13:32.000Z
|
2022-02-24T19:40:24.000Z
|
recOrder/__init__.py
|
mehta-lab/recOrder
|
67f2edb9ab13114dfe41d57e465ae24f961b0004
|
[
"Unlicense"
] | 55
|
2021-06-24T18:53:18.000Z
|
2022-03-30T21:05:14.000Z
|
recOrder/__init__.py
|
mehta-lab/recOrder
|
67f2edb9ab13114dfe41d57e465ae24f961b0004
|
[
"Unlicense"
] | null | null | null |
#todo: format overall init
| 26
| 26
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0.961538
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b2952d03d2ece23c458e0a74b716fd28110aed5
| 48
|
py
|
Python
|
dear_petition/petition/export/__init__.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 4
|
2020-04-01T14:42:45.000Z
|
2021-12-12T21:11:11.000Z
|
dear_petition/petition/export/__init__.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 142
|
2019-08-12T19:08:34.000Z
|
2022-03-29T23:05:35.000Z
|
dear_petition/petition/export/__init__.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 8
|
2020-02-04T20:37:00.000Z
|
2021-03-28T13:28:32.000Z
|
from .main import generate_petition_pdf # noqa
| 24
| 47
| 0.8125
| 7
| 48
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 1
| 48
| 48
| 0.902439
| 0.083333
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d99d33cefa4ad654506ccf81c4ad9d1ff622c3c9
| 89
|
py
|
Python
|
scripts/device/temprature_controller/__init__.py
|
heptaliane/my_measurements_scripts
|
0c977a1677d7881a33863ab376cab48a387a7d52
|
[
"MIT"
] | null | null | null |
scripts/device/temprature_controller/__init__.py
|
heptaliane/my_measurements_scripts
|
0c977a1677d7881a33863ab376cab48a387a7d52
|
[
"MIT"
] | null | null | null |
scripts/device/temprature_controller/__init__.py
|
heptaliane/my_measurements_scripts
|
0c977a1677d7881a33863ab376cab48a387a7d52
|
[
"MIT"
] | null | null | null |
from .interface import TempratureController
from .cryocon_model62 import Cryocon_Model62
| 29.666667
| 44
| 0.88764
| 10
| 89
| 7.7
| 0.6
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049383
| 0.089888
| 89
| 2
| 45
| 44.5
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9a19a13f7b275d11cbe882398e7a8d22f45a2c6
| 880
|
py
|
Python
|
Lib/importlib/machinery.py
|
oleksandr-pavlyk/cpython
|
eb002dbe0da9622245a355db5f0cd5aa2fc70b40
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Lib/importlib/machinery.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Lib/importlib/machinery.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
"""The machinery of importlib: finders, loaders, hooks, etc."""
from ._bootstrap import ModuleSpec
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap_external import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES,
EXTENSION_SUFFIXES)
from ._bootstrap_external import WindowsRegistryFinder
from ._bootstrap_external import PathFinder
from ._bootstrap_external import FileFinder
from ._bootstrap_external import SourceFileLoader
from ._bootstrap_external import SourcelessFileLoader
from ._bootstrap_external import ExtensionFileLoader
from ._bootstrap_external import NamespaceLoader
def all_suffixes():
"""Returns a list of all recognized module suffixes for this process"""
return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES
| 41.904762
| 75
| 0.810227
| 92
| 880
| 7.423913
| 0.423913
| 0.20937
| 0.245974
| 0.316252
| 0.120059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 880
| 20
| 76
| 44
| 0.908245
| 0.139773
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| true
| 0
| 0.733333
| 0
| 0.866667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9ec0299d5270d1f23f1810d918cb8a65b64ab4d
| 18,601
|
py
|
Python
|
shap/benchmark/measures.py
|
santanaangel/shap
|
1c1c4a45440f3475b8544251f9d9e5b43977cd0e
|
[
"MIT"
] | 16,097
|
2016-12-01T20:01:26.000Z
|
2022-03-31T20:27:40.000Z
|
shap/benchmark/measures.py
|
santanaangel/shap
|
1c1c4a45440f3475b8544251f9d9e5b43977cd0e
|
[
"MIT"
] | 2,217
|
2017-09-18T20:06:45.000Z
|
2022-03-31T21:00:25.000Z
|
shap/benchmark/measures.py
|
santanaangel/shap
|
1c1c4a45440f3475b8544251f9d9e5b43977cd0e
|
[
"MIT"
] | 2,634
|
2017-06-29T21:30:46.000Z
|
2022-03-30T07:30:36.000Z
|
import numpy as np
from tqdm.autonotebook import tqdm
import gc
import warnings
import sklearn.utils
_remove_cache = {}
def remove_retrain(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if those features had never existed. To determine this we can mask those features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by knowning
the features we masked. Since for individualized explanation methods each test sample has a
different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are withheld.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _remove_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _remove_cache:
if all(a is b for a,b in zip(_remove_cache["args"], args)) and np.all(_remove_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# mask nmask top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nmask = _remove_cache.get("nmask", None)
last_yp_masked_test = _remove_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'remove' metric"):
if cache_match and last_nmask[i] == nmask[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nmask[i] == 0:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
X_test_tmp[i,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_remove_cache["nmask"] = nmask
_remove_cache["yp_masked_test"] = yp_masked_test
_remove_cache["attr_test"] = attr_test
_remove_cache["args"] = args
return metric(y_test, yp_masked_test)
def remove_mask(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" Each test sample is masked by setting the important features to a constant.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each test explanation
X_test_tmp = X_test.copy()
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[:nmask[i]]] = mean_vals[ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_impute(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[nmask[i]:]
impute_inds = ordering[:nmask[i]]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_resample(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to resample background values.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[:nmask[i]]] = X_train[inds, :][:, ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
_keep_cache = {}
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test)
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_keep_retrain(nkeep_train, nkeep_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of keep that only retraines the model once.
This is alse called KAR (Keep And Retrain) in work by Google. It is much more computationally
efficient that the keep method because it masks the unimportant features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the keep metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nkeep top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nkeep_train[i] < X_train.shape[1]:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[nkeep_train[i]:]] = X_train_mean[ordering[nkeep_train[i]:]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nkeep_test[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[nkeep_test[i]:]] = X_train_mean[ordering[nkeep_test[i]:]]
# train the model with all the features not given masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1))
def to_array(*args):
return [a.values if str(type(a)).endswith("'pandas.core.frame.DataFrame'>") else a for a in args]
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed)
def strip_list(attrs):
""" This assumes that if you have a list of outputs you just want the second one (the second class is the '1' class).
"""
if isinstance(attrs, list):
return attrs[1]
else:
return attrs
| 43.767059
| 132
| 0.692812
| 2,974
| 18,601
| 4.101547
| 0.096839
| 0.04099
| 0.038367
| 0.021643
| 0.895147
| 0.87334
| 0.848418
| 0.833005
| 0.828414
| 0.816445
| 0
| 0.007495
| 0.21101
| 18,601
| 424
| 133
| 43.870283
| 0.823658
| 0.296758
| 0
| 0.676596
| 0
| 0
| 0.040918
| 0.002343
| 0
| 0
| 0
| 0
| 0.046809
| 1
| 0.06383
| false
| 0
| 0.021277
| 0.004255
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9f0e671999bca27f60afc3f2a6697179b56439d
| 26,542
|
py
|
Python
|
ironic-plugin-pike/ironic/tests/unit/drivers/modules/test_console_utils.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | null | null | null |
ironic-plugin-pike/ironic/tests/unit/drivers/modules/test_console_utils.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | null | null | null |
ironic-plugin-pike/ironic/tests/unit/drivers/modules/test_console_utils.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | 1
|
2019-01-11T16:00:23.000Z
|
2019-01-11T16:00:23.000Z
|
# coding=utf-8
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for console_utils driver module."""
import errno
import os
import random
import signal
import string
import subprocess
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
from oslo_service import loopingcall
from oslo_utils import netutils
import psutil
from ironic.common import exception
from ironic.drivers.modules import console_utils
from ironic.drivers.modules import ipmitool as ipmi
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
INFO_DICT = db_utils.get_test_ipmi_info()
class ConsoleUtilsTestCase(db_base.DbTestCase):
def setUp(self):
super(ConsoleUtilsTestCase, self).setUp()
self.node = obj_utils.get_test_node(
self.context,
driver='fake_ipmitool',
driver_info=INFO_DICT)
self.info = ipmi._parse_driver_info(self.node)
def test__get_console_pid_dir(self):
pid_dir = '/tmp/pid_dir'
self.config(terminal_pid_dir=pid_dir, group='console')
dir = console_utils._get_console_pid_dir()
self.assertEqual(pid_dir, dir)
def test__get_console_pid_dir_tempdir(self):
self.config(tempdir='/tmp/fake_dir')
dir = console_utils._get_console_pid_dir()
self.assertEqual(CONF.tempdir, dir)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test__ensure_console_pid_dir_exists(self, mock_path_exists,
mock_makedirs):
mock_path_exists.return_value = True
mock_makedirs.side_effect = OSError
pid_dir = console_utils._get_console_pid_dir()
console_utils._ensure_console_pid_dir_exists()
mock_path_exists.assert_called_once_with(pid_dir)
self.assertFalse(mock_makedirs.called)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test__ensure_console_pid_dir_exists_fail(self, mock_path_exists,
mock_makedirs):
mock_path_exists.return_value = False
mock_makedirs.side_effect = OSError
pid_dir = console_utils._get_console_pid_dir()
self.assertRaises(exception.ConsoleError,
console_utils._ensure_console_pid_dir_exists)
mock_path_exists.assert_called_once_with(pid_dir)
mock_makedirs.assert_called_once_with(pid_dir)
@mock.patch.object(console_utils, '_get_console_pid_dir', autospec=True)
def test__get_console_pid_file(self, mock_dir):
mock_dir.return_value = tempfile.gettempdir()
expected_path = '%(tempdir)s/%(uuid)s.pid' % {
'tempdir': mock_dir.return_value,
'uuid': self.info['uuid']}
path = console_utils._get_console_pid_file(self.info['uuid'])
self.assertEqual(expected_path, path)
mock_dir.assert_called_once_with()
@mock.patch.object(console_utils, 'open',
mock.mock_open(read_data='12345\n'))
@mock.patch.object(console_utils, '_get_console_pid_file', autospec=True)
def test__get_console_pid(self, mock_pid_file):
tmp_file_handle = tempfile.NamedTemporaryFile()
tmp_file = tmp_file_handle.name
mock_pid_file.return_value = tmp_file
pid = console_utils._get_console_pid(self.info['uuid'])
mock_pid_file.assert_called_once_with(self.info['uuid'])
self.assertEqual(pid, 12345)
@mock.patch.object(console_utils, 'open',
mock.mock_open(read_data='Hello World\n'))
@mock.patch.object(console_utils, '_get_console_pid_file', autospec=True)
def test__get_console_pid_not_a_num(self, mock_pid_file):
tmp_file_handle = tempfile.NamedTemporaryFile()
tmp_file = tmp_file_handle.name
mock_pid_file.return_value = tmp_file
self.assertRaises(exception.NoConsolePid,
console_utils._get_console_pid,
self.info['uuid'])
mock_pid_file.assert_called_once_with(self.info['uuid'])
def test__get_console_pid_file_not_found(self):
self.assertRaises(exception.NoConsolePid,
console_utils._get_console_pid,
self.info['uuid'])
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(os, 'kill', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
def test__stop_console(self, mock_pid, mock_kill, mock_unlink):
pid_file = console_utils._get_console_pid_file(self.info['uuid'])
mock_pid.return_value = 12345
console_utils._stop_console(self.info['uuid'])
mock_pid.assert_called_once_with(self.info['uuid'])
mock_kill.assert_called_once_with(mock_pid.return_value,
signal.SIGTERM)
mock_unlink.assert_called_once_with(pid_file)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(os, 'kill', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
def test__stop_console_nopid(self, mock_pid, mock_kill, mock_unlink):
pid_file = console_utils._get_console_pid_file(self.info['uuid'])
mock_pid.side_effect = exception.NoConsolePid(pid_path="/tmp/blah")
self.assertRaises(exception.NoConsolePid,
console_utils._stop_console,
self.info['uuid'])
mock_pid.assert_called_once_with(self.info['uuid'])
self.assertFalse(mock_kill.called)
mock_unlink.assert_called_once_with(pid_file)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(os, 'kill', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
def test__stop_console_shellinabox_not_running(self, mock_pid,
mock_kill, mock_unlink):
pid_file = console_utils._get_console_pid_file(self.info['uuid'])
mock_pid.return_value = 12345
mock_kill.side_effect = OSError(errno.ESRCH, 'message')
console_utils._stop_console(self.info['uuid'])
mock_pid.assert_called_once_with(self.info['uuid'])
mock_kill.assert_called_once_with(mock_pid.return_value,
signal.SIGTERM)
mock_unlink.assert_called_once_with(pid_file)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(os, 'kill', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
def test__stop_console_exception(self, mock_pid, mock_kill, mock_unlink):
pid_file = console_utils._get_console_pid_file(self.info['uuid'])
mock_pid.return_value = 12345
mock_kill.side_effect = OSError(2, 'message')
self.assertRaises(exception.ConsoleError,
console_utils._stop_console,
self.info['uuid'])
mock_pid.assert_called_once_with(self.info['uuid'])
mock_kill.assert_called_once_with(mock_pid.return_value,
signal.SIGTERM)
mock_unlink.assert_called_once_with(pid_file)
def _get_shellinabox_console(self, scheme):
generated_url = (
console_utils.get_shellinabox_console_url(self.info['port']))
console_host = CONF.my_ip
if netutils.is_valid_ipv6(console_host):
console_host = '[%s]' % console_host
http_url = "%s://%s:%s" % (scheme, console_host, self.info['port'])
self.assertEqual(http_url, generated_url)
def test_get_shellinabox_console_url(self):
self._get_shellinabox_console('http')
def test_get_shellinabox_console_https_url(self):
# specify terminal_cert_dir in /etc/ironic/ironic.conf
self.config(terminal_cert_dir='/tmp', group='console')
# use https
self._get_shellinabox_console('https')
def test_make_persistent_password_file(self):
filepath = '%(tempdir)s/%(node_uuid)s' % {
'tempdir': tempfile.gettempdir(),
'node_uuid': self.info['uuid']}
password = ''.join([random.choice(string.ascii_letters)
for n in range(16)])
console_utils.make_persistent_password_file(filepath, password)
# make sure file exists
self.assertTrue(os.path.exists(filepath))
# make sure the content is correct
with open(filepath) as file:
content = file.read()
self.assertEqual(password, content)
# delete the file
os.unlink(filepath)
@mock.patch.object(os, 'chmod', autospec=True)
def test_make_persistent_password_file_fail(self, mock_chmod):
mock_chmod.side_effect = IOError()
filepath = '%(tempdir)s/%(node_uuid)s' % {
'tempdir': tempfile.gettempdir(),
'node_uuid': self.info['uuid']}
self.assertRaises(exception.PasswordFileFailedToCreate,
console_utils.make_persistent_password_file,
filepath,
'password')
@mock.patch.object(console_utils, 'open',
mock.mock_open(read_data='12345\n'))
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(psutil, 'pid_exists', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_shellinabox_console(self, mock_stop,
mock_dir_exists,
mock_pid_exists,
mock_popen,
mock_path_exists):
mock_popen.return_value.poll.return_value = 0
mock_pid_exists.return_value = True
mock_path_exists.return_value = True
console_utils.start_shellinabox_console(self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_pid_exists.assert_called_once_with(12345)
mock_popen.assert_called_once_with(mock.ANY,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
mock_popen.return_value.poll.assert_called_once_with()
@mock.patch.object(console_utils, 'open',
mock.mock_open(read_data='12345\n'))
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(psutil, 'pid_exists', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_shellinabox_console_nopid(self, mock_stop,
mock_dir_exists,
mock_pid_exists,
mock_popen,
mock_path_exists):
# no existing PID file before starting
mock_stop.side_effect = exception.NoConsolePid('/tmp/blah')
mock_popen.return_value.poll.return_value = 0
mock_pid_exists.return_value = True
mock_path_exists.return_value = True
console_utils.start_shellinabox_console(self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_pid_exists.assert_called_once_with(12345)
mock_popen.assert_called_once_with(mock.ANY,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
mock_popen.return_value.poll.assert_called_once_with()
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_shellinabox_console_fail(self, mock_stop, mock_dir_exists,
mock_popen):
mock_popen.return_value.poll.return_value = 1
mock_popen.return_value.communicate.return_value = ('output', 'error')
self.assertRaises(exception.ConsoleSubprocessFailed,
console_utils.start_shellinabox_console,
self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_popen.assert_called_once_with(mock.ANY,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
mock_popen.return_value.poll.assert_called_once_with()
@mock.patch.object(console_utils, 'open',
mock.mock_open(read_data='12345\n'))
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(psutil, 'pid_exists', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_shellinabox_console_fail_no_pid(self, mock_stop,
mock_dir_exists,
mock_pid_exists,
mock_popen,
mock_path_exists):
mock_popen.return_value.poll.return_value = 0
mock_pid_exists.return_value = False
mock_popen.return_value.communicate.return_value = ('output', 'error')
mock_path_exists.return_value = True
self.assertRaises(exception.ConsoleSubprocessFailed,
console_utils.start_shellinabox_console,
self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_pid_exists.assert_called_once_with(12345)
mock_popen.assert_called_once_with(mock.ANY,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
mock_popen.return_value.poll.assert_called_once_with()
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_shellinabox_console_fail_nopiddir(self, mock_stop,
mock_dir_exists,
mock_popen):
mock_dir_exists.side_effect = exception.ConsoleError(message='fail')
mock_popen.return_value.poll.return_value = 0
self.assertRaises(exception.ConsoleError,
console_utils.start_shellinabox_console,
self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
self.assertFalse(mock_popen.called)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_stop_shellinabox_console(self, mock_stop):
console_utils.stop_shellinabox_console(self.info['uuid'])
mock_stop.assert_called_once_with(self.info['uuid'])
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_stop_shellinabox_console_fail_nopid(self, mock_stop):
mock_stop.side_effect = exception.NoConsolePid('/tmp/blah')
console_utils.stop_shellinabox_console(self.info['uuid'])
mock_stop.assert_called_once_with(self.info['uuid'])
def test_get_socat_console_url_tcp(self):
self.config(my_ip="10.0.0.1")
url = console_utils.get_socat_console_url(self.info['port'])
self.assertEqual("tcp://10.0.0.1:%s" % self.info['port'], url)
def test_get_socat_console_url_tcp6(self):
self.config(my_ip='::1')
url = console_utils.get_socat_console_url(self.info['port'])
self.assertEqual("tcp://[::1]:%s" % self.info['port'], url)
def test_get_socat_console_url_tcp_with_address_conf(self):
self.config(socat_address="10.0.0.1", group='console')
url = console_utils.get_socat_console_url(self.info['port'])
self.assertEqual("tcp://10.0.0.1:%s" % self.info['port'], url)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid_file', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
@mock.patch.object(loopingcall.FixedIntervalLoopingCall, 'start',
autospec=True)
def _test_start_socat_console_check_arg(self, mock_timer_start,
mock_stop, mock_dir_exists,
mock_get_pid, mock_popen):
mock_timer_start.return_value = mock.Mock()
mock_get_pid.return_value = '/tmp/%s.pid' % self.info['uuid']
console_utils.start_socat_console(self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_get_pid.assert_called_once_with(self.info['uuid'])
mock_timer_start.assert_called_once_with(mock.ANY, interval=mock.ANY)
mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE)
return mock_popen.call_args[0][0]
def test_start_socat_console_check_arg_default_timeout(self):
args = self._test_start_socat_console_check_arg()
self.assertIn('-T600', args)
def test_start_socat_console_check_arg_timeout(self):
self.config(terminal_timeout=1, group='console')
args = self._test_start_socat_console_check_arg()
self.assertIn('-T1', args)
def test_start_socat_console_check_arg_timeout_disabled(self):
self.config(terminal_timeout=0, group='console')
args = self._test_start_socat_console_check_arg()
self.assertNotIn('-T0', args)
def test_start_socat_console_check_arg_bind_addr_default_ipv4(self):
self.config(my_ip='10.0.0.1')
args = self._test_start_socat_console_check_arg()
self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr' %
self.info['port'], args)
def test_start_socat_console_check_arg_bind_addr_ipv4(self):
self.config(socat_address='10.0.0.1', group='console')
args = self._test_start_socat_console_check_arg()
self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr' %
self.info['port'], args)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(psutil, 'pid_exists', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_socat_console(self, mock_stop,
mock_dir_exists,
mock_get_pid,
mock_pid_exists,
mock_popen,
mock_path_exists):
mock_popen.return_value.pid = 23456
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.communicate.return_value = (None, None)
mock_get_pid.return_value = 23456
mock_path_exists.return_value = True
console_utils.start_socat_console(self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_get_pid.assert_called_with(self.info['uuid'])
mock_path_exists.assert_called_with(mock.ANY)
mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(psutil, 'pid_exists', autospec=True)
@mock.patch.object(console_utils, '_get_console_pid', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_socat_console_nopid(self, mock_stop,
mock_dir_exists,
mock_get_pid,
mock_pid_exists,
mock_popen,
mock_path_exists):
# no existing PID file before starting
mock_stop.side_effect = exception.NoConsolePid('/tmp/blah')
mock_popen.return_value.pid = 23456
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.communicate.return_value = (None, None)
mock_get_pid.return_value = 23456
mock_path_exists.return_value = True
console_utils.start_socat_console(self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_get_pid.assert_called_with(self.info['uuid'])
mock_path_exists.assert_called_with(mock.ANY)
mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_socat_console_fail(self, mock_stop, mock_dir_exists,
mock_popen):
mock_popen.side_effect = OSError()
mock_popen.return_value.pid = 23456
mock_popen.return_value.poll.return_value = 1
mock_popen.return_value.communicate.return_value = (None, 'error')
self.assertRaises(exception.ConsoleSubprocessFailed,
console_utils.start_socat_console,
self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen', autospec=True)
@mock.patch.object(console_utils, '_ensure_console_pid_dir_exists',
autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_start_socat_console_fail_nopiddir(self, mock_stop,
mock_dir_exists,
mock_popen):
mock_dir_exists.side_effect = exception.ConsoleError(message='fail')
self.assertRaises(exception.ConsoleError,
console_utils.start_socat_console,
self.info['uuid'],
self.info['port'],
'ls&')
mock_stop.assert_called_once_with(self.info['uuid'])
mock_dir_exists.assert_called_once_with()
mock_popen.assert_not_called()
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_stop_socat_console(self, mock_stop):
console_utils.stop_socat_console(self.info['uuid'])
mock_stop.assert_called_once_with(self.info['uuid'])
@mock.patch.object(console_utils.LOG, 'warning', autospec=True)
@mock.patch.object(console_utils, '_stop_console', autospec=True)
def test_stop_socat_console_fail_nopid(self, mock_stop, mock_log_warning):
mock_stop.side_effect = exception.NoConsolePid('/tmp/blah')
console_utils.stop_socat_console(self.info['uuid'])
mock_stop.assert_called_once_with(self.info['uuid'])
# LOG.warning() is called when _stop_console() raises NoConsolePid
self.assertTrue(mock_log_warning.called)
| 46.16
| 78
| 0.634315
| 3,159
| 26,542
| 4.959797
| 0.081988
| 0.061271
| 0.070845
| 0.074036
| 0.828759
| 0.806676
| 0.784082
| 0.764488
| 0.748341
| 0.723002
| 0
| 0.007986
| 0.268744
| 26,542
| 574
| 79
| 46.240418
| 0.799268
| 0.036169
| 0
| 0.684902
| 0
| 0
| 0.0713
| 0.020075
| 0
| 0
| 0
| 0
| 0.203501
| 1
| 0.087527
| false
| 0.017505
| 0.041575
| 0
| 0.133479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a31452d04c8ae9e47bed68e0e826fb663e642ef
| 11,408
|
py
|
Python
|
serial_scripts/mvpn/test_mvpn.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
serial_scripts/mvpn/test_mvpn.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:18:29.000Z
|
2021-06-01T22:18:29.000Z
|
serial_scripts/mvpn/test_mvpn.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
from tcutils.wrappers import preposttest_wrapper
from compute_node_test import ComputeNodeFixture
import test
from common.mvpn.base import *
from tcutils.traffic_utils.scapy_traffic_gen import ScapyTraffic
from tcutils.traffic_utils.traffic_analyzer import TrafficAnalyzer
class TestMVPNSingleVNSingleCompute(MVPNTestSingleVNSingleComputeBase):
@classmethod
def setUpClass(cls):
super(TestMVPNSingleVNSingleCompute, cls).setUpClass()
# end setUpClass
@classmethod
def tearDownClass(cls):
super(TestMVPNSingleVNSingleCompute, cls).tearDownClass()
# end tearDownClass
@test.attr(type=['sanity'])
@preposttest_wrapper
def test_mvpn_single_vn_within_compute(self):
'''
Test MVPN functionality when both multicast source and
receivers are part of a single VN and also part of the same
compute.
'''
# Bringup MVPN setup
ret_dict = self.bringup_mvpn_setup()
vm_fixtures = ret_dict['vm_fixtures']
# Verify MVPN Type-1 routes
route_type = 1
result = self.verify_mvpn_routes(route_type)
# IGMP Join parameter details
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 1, # Record type. INCLUDE
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Multicast Traffic details
# IGMPv3 join is sent from vm2, not from vm3. So, that when multicast
# source vm1 starts sending data traffic, vm2 only should receive the
# traffic, not vm3.
traffic = {'stream1': {'src':'vm1', # Multicast source
'rcvrs': ['vm2'], # Multicast receivers
'non_rcvrs': ['vm3'],# Non Multicast receivers
'maddr':'239.1.1.1', # Multicast group address
'count':10 # Num of packets
}
}
# Send and verify IGMP reports and multicast data traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# IGMP Leave parameter details
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 6, # Record type.BLOCK OLD SOURCES
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# end test_mvpn_single_vn_within_compute
class TestMVPNSingleVNMultiCompute(MVPNTestSingleVNMultiComputeBase):
@classmethod
def setUpClass(cls):
super(TestMVPNSingleVNMultiCompute, cls).setUpClass()
# end setUpClass
@classmethod
def tearDownClass(cls):
super(TestMVPNSingleVNMultiCompute, cls).tearDownClass()
# end tearDownClass
@preposttest_wrapper
def test_mvpn_single_vn_multi_compute(self):
'''
Test MVPN functionality when both multicast source and
receivers are part of a single VN. But, source and receivers are
part of different computes
'''
# Bringup MVPN setup
ret_dict = self.bringup_mvpn_setup()
vm_fixtures = ret_dict['vm_fixtures']
# Verify MVPN Type-1 routes
route_type = 1
result = self.verify_mvpn_routes(route_type)
# IGMP Join parameter details
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 1, # Record type. INCLUDE
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Multicast Traffic details
# IGMPv3 join is sent from vm3, not from vm2 and vm4. So, that when
# multicast source vm1 starts sending data traffic, vm3 only should
# receive the traffic, not vm2 and vm4.
traffic = {'stream1':{'src':'vm1', # Multicast source
'rcvrs': ['vm3'], # Multicast receivers
'non_rcvrs': ['vm2','vm4'], # Non Multicast receivers
'maddr': '239.1.1.1', # Multicast group address
'count':10 # Num of packets
}
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# IGMP Leave parameter details
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 6, # Record type.BLOCK OLD SOURCES
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# end test_mvpn_single_vn_multi_compute
class TestMVPNMultiVNSingleCompute(MVPNTestMultiVNSingleComputeBase):
@classmethod
def setUpClass(cls):
super(TestMVPNMultiVNSingleCompute, cls).setUpClass()
# end setUpClass
@classmethod
def tearDownClass(cls):
super(TestMVPNMultiVNSingleCompute, cls).tearDownClass()
# end tearDownClass
@preposttest_wrapper
def test_mvpn_multi_vn_single_compute(self):
'''
Test MVPN functionality when both multicast source and
receivers are part of a multiple VNs. But, source and receivers are
part of different computes
'''
# Bringup MVPN setup
ret_dict = self.bringup_mvpn_setup()
vm_fixtures = ret_dict['vm_fixtures']
# Verify MVPN Type-1 routes
route_type = 1
result = self.verify_mvpn_routes(route_type)
# IGMP Join parameters
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 1, # Record type. INCLUDE
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Multicast Traffic details
# IGMPv3 join is sent from vm2 and vm3, not from vm4. So, that when
# multicast source vm1 starts sending data traffic, vm2 and vm3 only
# should receive the traffic, not vm4.
traffic = {'stream1': {'src':'vm1', # Multicast source
'rcvrs': ['vm2','vm3'], # Multicast receivers
'non_rcvrs': ['vm4'], # Non Multicast receivers
'maddr': '239.1.1.1', # Multicast group address
'count':10 # Num of packets
}
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# IGMP Leave parameters
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 6, # Record type.BLOCK OLD SOURCES
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# end test_mvpn_multi_vn_single_compute
class TestMVPNMultiVNMultiCompute(MVPNTestMultiVNMultiComputeBase):
@classmethod
def setUpClass(cls):
super(TestMVPNMultiVNMultiCompute, cls).setUpClass()
# end setUpClass
@classmethod
def tearDownClass(cls):
super(TestMVPNMultiVNMultiCompute, cls).tearDownClass()
# end tearDownClass
@preposttest_wrapper
def test_mvpn_multi_vn_multi_compute(self):
'''
Test MVPN functionality when both multicast source and
receivers are part of a single VN. But, source and receivers are
part of different computes
'''
# Bringup MVPN setup
ret_dict = self.bringup_mvpn_setup()
vm_fixtures = ret_dict['vm_fixtures']
# Verify MVPN Type-1 routes
route_type = 1
result = self.verify_mvpn_routes(route_type)
# IGMP Join parameters
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 1, # Record type. INCLUDE
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Multicast Traffic details
# IGMPv3 join is sent from vm2 and vm3, not from vm4. So, that when
# multicast source vm1 starts sending data traffic, vm2 and vm3 only
# should receive the traffic, not vm4.
traffic = {'stream1': {'src':'vm1', # Multicast source
'rcvrs': ['vm2', 'vm3'], # Multicast receivers
'non_rcvrs': ['vm4'], # Non Multicast receivers
'maddr': '239.1.1.1', # Multicast group address
'count':10 # Num of packets
}
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# IGMP Leave parameters
igmp = {'type': 0x22, # IGMPv3 Report
'numgrp': 1, # Number of group records
'record1': {
'rtype': 6, # Record type.BLOCK OLD SOURCES
'maddr': '239.1.1.1', # Multicast group address
'srcaddrs': ['30.30.30.1'] # List of multicast source addresses
},
}
# Send and verify IGMP reports and multicast traffic
result = self.send_verify_mcast(vm_fixtures, traffic, igmp)
# end test_mvpn_multi_vn_multi_compute
| 39.611111
| 85
| 0.533398
| 1,118
| 11,408
| 5.33542
| 0.11449
| 0.008047
| 0.018106
| 0.020117
| 0.852808
| 0.826488
| 0.811065
| 0.798324
| 0.783906
| 0.732775
| 0
| 0.035622
| 0.387272
| 11,408
| 287
| 86
| 39.749129
| 0.81774
| 0.332486
| 0
| 0.679245
| 0
| 0
| 0.095793
| 0
| 0
| 0
| 0.004385
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.037736
| 0
| 0.138365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8abc27f0b8e16027f7e9a6f931266c08f066b6cf
| 220
|
py
|
Python
|
oscar/lib/python2.7/site-packages/django_extensions/utils/deprecation.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/django_extensions/utils/deprecation.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/django_extensions/utils/deprecation.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.utils.deprecation import RemovedInNextVersionWarning
class MarkedForDeprecationWarning(RemovedInNextVersionWarning):
pass
| 24.444444
| 65
| 0.759091
| 18
| 220
| 9
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005525
| 0.177273
| 220
| 8
| 66
| 27.5
| 0.889503
| 0.095455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0a0d09a0b4eb4f56c25030174230ea51ea0306f6
| 48
|
py
|
Python
|
tests/test_views.py
|
codezeus/django-helpers
|
a28cc19e32cf41130e848c268d26c1858a7cf26a
|
[
"MIT"
] | null | null | null |
tests/test_views.py
|
codezeus/django-helpers
|
a28cc19e32cf41130e848c268d26c1858a7cf26a
|
[
"MIT"
] | null | null | null |
tests/test_views.py
|
codezeus/django-helpers
|
a28cc19e32cf41130e848c268d26c1858a7cf26a
|
[
"MIT"
] | null | null | null |
import pytest
from django_toolset import views
| 12
| 32
| 0.854167
| 7
| 48
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 3
| 33
| 16
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a27c814c08422d35bbb1097b53a775f175d023c
| 202
|
py
|
Python
|
tests/services/__init__.py
|
Bystroushaak/bottle-gui
|
5135a87e7f0be8e36c1fc8663f46d1dbe6e89a2a
|
[
"MIT"
] | 3
|
2015-01-03T22:10:33.000Z
|
2015-01-04T16:48:45.000Z
|
tests/services/__init__.py
|
Bystroushaak/bottle-gui
|
5135a87e7f0be8e36c1fc8663f46d1dbe6e89a2a
|
[
"MIT"
] | 4
|
2015-01-03T18:44:34.000Z
|
2020-09-26T08:02:18.000Z
|
tests/services/__init__.py
|
Bystroushaak/bottle-gui
|
5135a87e7f0be8e36c1fc8663f46d1dbe6e89a2a
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from hist import *
from xex import *
| 22.444444
| 79
| 0.420792
| 19
| 202
| 4.473684
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017045
| 0.128713
| 202
| 8
| 80
| 25.25
| 0.465909
| 0.757426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a2a0398b1e8d96c44f4834cbda1121594c86452
| 43
|
py
|
Python
|
code/ch5-viewmodels/services/user_service.py
|
mtgeekman/web-applications-with-fastapi-course
|
0ec278583542360fc6aaef7db5372a827e95deb8
|
[
"MIT"
] | 225
|
2020-12-31T08:30:08.000Z
|
2022-03-30T14:14:47.000Z
|
code/ch5-viewmodels/services/user_service.py
|
mtgeekman/web-applications-with-fastapi-course
|
0ec278583542360fc6aaef7db5372a827e95deb8
|
[
"MIT"
] | 10
|
2021-02-09T01:28:53.000Z
|
2022-02-25T19:03:49.000Z
|
code/ch5-viewmodels/services/user_service.py
|
mtgeekman/web-applications-with-fastapi-course
|
0ec278583542360fc6aaef7db5372a827e95deb8
|
[
"MIT"
] | 145
|
2021-02-06T09:31:46.000Z
|
2022-03-26T19:18:20.000Z
|
def user_count() -> int:
return 73_874
| 14.333333
| 24
| 0.651163
| 7
| 43
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 0.232558
| 43
| 2
| 25
| 21.5
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
0a4103067d07eac658207762e0d7bfaf098f7c9a
| 2,280
|
py
|
Python
|
torch_geometric/graphgym/models/pooling.py
|
JinheonBaek/pytorch_geometric
|
dfd32d08a3d8191d6290e53458d4eda515d04fd6
|
[
"MIT"
] | 4
|
2021-05-03T20:22:34.000Z
|
2021-12-11T03:19:07.000Z
|
torch_geometric/graphgym/models/pooling.py
|
JinheonBaek/pytorch_geometric
|
dfd32d08a3d8191d6290e53458d4eda515d04fd6
|
[
"MIT"
] | 1
|
2021-09-10T06:36:13.000Z
|
2021-10-06T14:20:16.000Z
|
torch_geometric/graphgym/models/pooling.py
|
JinheonBaek/pytorch_geometric
|
dfd32d08a3d8191d6290e53458d4eda515d04fd6
|
[
"MIT"
] | 2
|
2021-07-10T10:16:43.000Z
|
2021-11-04T07:36:55.000Z
|
from torch_scatter import scatter
import torch_geometric.graphgym.register as register
def global_add_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise sum.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='add')
def global_mean_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise mean.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='mean')
def global_max_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise max.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='max')
pooling_dict = {
'add': global_add_pool,
'mean': global_mean_pool,
'max': global_max_pool
}
pooling_dict = {**register.pooling_dict, **pooling_dict}
| 32.571429
| 78
| 0.693421
| 324
| 2,280
| 4.777778
| 0.203704
| 0.023256
| 0.027132
| 0.027132
| 0.848837
| 0.848837
| 0.848837
| 0.848837
| 0.848837
| 0.848837
| 0
| 0.003382
| 0.22193
| 2,280
| 69
| 79
| 33.043478
| 0.869222
| 0.596053
| 0
| 0.176471
| 0
| 0
| 0.025773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a535596ab266b0ea3b838b230aa3ae14a01461a
| 25
|
py
|
Python
|
clibs/tess2/__init__.py
|
filonik/clibs
|
d060d396515d1d4ba5a94cd5a10a6d728e42c295
|
[
"MIT"
] | null | null | null |
clibs/tess2/__init__.py
|
filonik/clibs
|
d060d396515d1d4ba5a94cd5a10a6d728e42c295
|
[
"MIT"
] | null | null | null |
clibs/tess2/__init__.py
|
filonik/clibs
|
d060d396515d1d4ba5a94cd5a10a6d728e42c295
|
[
"MIT"
] | null | null | null |
from .tesselator import *
| 25
| 25
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a6044b98521f2d3a0816b5d00e7cd24bf62aacb
| 8,339
|
py
|
Python
|
flamio/user.py
|
Jkreid/flamio
|
c7d98b7e39f0a8e5792a236e9508632d294525b2
|
[
"MIT"
] | null | null | null |
flamio/user.py
|
Jkreid/flamio
|
c7d98b7e39f0a8e5792a236e9508632d294525b2
|
[
"MIT"
] | 11
|
2020-05-29T18:14:58.000Z
|
2021-07-21T02:41:14.000Z
|
flamio/user.py
|
Jkreid/flamio
|
c7d98b7e39f0a8e5792a236e9508632d294525b2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 17 11:31:40 2021
@author: justi
"""
from abc import ABC, abstractmethod
import flamio
def flamio_method(method):
def data_wrapped(user, *args, **kwargs):
user.pre_method()
value = method(user, *args, **kwargs)
user.aft_method()
return value
return data_wrapped
class User(ABC):
#// User Methods //////////////////////////////////////////////////////////
def __init__(self, username, *args, info={}, **kwargs):
self.username = username
self.info = info
super().__init__()
@abstractmethod
def save(self):
# save self._info to saved data
pass
@abstractmethod
def load(self):
# set self._info by accessing saved data
pass
@abstractmethod
def pre_method(self):
pass
@abstractmethod
def aft_method(self):
pass
@property
def info(self):
return self._info
@info.setter
def info(self, info):
self._info = info
#// Flamio Methods ////////////////////////////////////////////////////////
@flamio_method
def create_track(self, *args, **kwargs):
flamio.create_track(self.info, *args, **kwargs)
@flamio_method
def get_track(self, *args, **kwargs):
return flamio.get_track(self.info, *args, **kwargs)
@flamio_method
def delete_track(self, *args, **kwargs):
flamio.delete_track(self.info, *args, **kwargs)
@flamio_method
def create_tag(self, *args, **kwargs):
flamio.create_tag(self.info, *args, **kwargs)
@flamio_method
def delete_tag(self, *args, **kwargs):
flamio.delete_tag(self.info, *args, **kwargs)
@flamio_method
def get_tag(self, *args, **kwargs):
return flamio.get_tag(self.info, *args, **kwargs)
@flamio_method
def rename_tag(self, *args, **kwargs):
flamio.rename_tag(self.info, *args, **kwargs)
@flamio_method
def get_track_tags(self, *args, **kwargs):
return flamio.get_track_tags(self.info, *args, **kwargs)
@flamio_method
def add_tag_to_track(self, *args, **kwargs):
flamio.add_tag_to_track(self.info, *args, **kwargs)
@flamio_method
def remove_tag_from_track(self, *args, **kwargs):
flamio.remove_tag_from_track(self.info, *args, **kwargs)
@flamio_method
def get_mix_tags(self, *args, **kwargs):
return flamio.get_mix_tags(self.info, *args, **kwargs)
@flamio_method
def add_tag_to_mix(self, *args, **kwargs):
flamio.add_tag_to_mix(self.info, *args, **kwargs)
@flamio_method
def remove_tag_from_mix(self, *args, **kwargs):
flamio.remove_tag_from_mix(self.info, *args, **kwargs)
@flamio_method
def create_loop(self, *args, **kwargs):
flamio.create_loop(self.info, *args, **kwargs)
@flamio_method
def get_loop(self, *args, **kwargs):
return flamio.get_loop(self.info, *args, **kwargs)
@flamio_method
def delete_loop(self, *args, **kwargs):
flamio.delete_loop(self.info, *args, **kwargs)
@flamio_method
def add_loop_time(self, *args, **kwargs):
flamio.add_loop_time(self.info, *args, **kwargs)
@flamio_method
def get_loop_time(self, *args, **kwargs):
return flamio.get_loop_time(self.info, *args, **kwargs)
@flamio_method
def edit_loop_time(self, *args, **kwargs):
flamio.edit_loop_time(self.info, *args, **kwargs)
@flamio_method
def delete_loop_time(self, *args, **kwargs):
flamio.delete_loop_time(self.info, *args, **kwargs)
@flamio_method
def multidelete_loop_times(self, *args, **kwargs):
flamio.multidelete_loop_times(self.info, *args, **kwargs)
@flamio_method
def duplicate_loop_time(self, *args, **kwargs):
flamio.duplicate_loop_time(self.info, *args, **kwargs)
@flamio_method
def move_loop_time(self, *args, **kwargs):
flamio.move_loop_time(self.info, *args, **kwargs)
@flamio_method
def swap_loop_times(self, *args, **kwargs):
flamio.swap_loop_times(self.info, *args, **kwargs)
@flamio_method
def create_skip(self, *args, **kwargs):
flamio.create_skip(self.info, *args, **kwargs)
@flamio_method
def get_skip(self, *args, **kwargs):
return flamio.get_skip(self.info, *args, **kwargs)
@flamio_method
def delete_skip(self, *args, **kwargs):
flamio.delete_skip(self.info, *args, **kwargs)
@flamio_method
def add_skip_time(self, *args, **kwargs):
flamio.add_skip_time(self.info, *args, **kwargs)
@flamio_method
def get_skip_time(self, *args, **kwargs):
return flamio.get_skip_time(self.info, *args, **kwargs)
@flamio_method
def edit_skip_time(self, *args, **kwargs):
flamio.edit_skip_time(self.info, *args, **kwargs)
@flamio_method
def delete_skip_time(self, *args, **kwargs):
flamio.delete_skip_time(self.info, *args, **kwargs)
@flamio_method
def multidelete_skip_times(self, *args, **kwargs):
flamio.multidelete_skip_times(self.info, *args, **kwargs)
@flamio_method
def duplicate_skip_time(self, *args, **kwargs):
flamio.duplicate_skip_time(self.info, *args, **kwargs)
@flamio_method
def move_skip_time(self, *args, **kwargs):
flamio.move_skip_time(self.info, *args, **kwargs)
@flamio_method
def swap_skip_times(self, *args, **kwargs):
flamio.swap_skip_times(self.info, *args, **kwargs)
@flamio_method
def create_mix(self, *args, **kwargs):
flamio.create_mix(self.info, *args, **kwargs)
@flamio_method
def get_mix(self, *args, **kwargs):
return flamio.get_mix(self.info, *args, **kwargs)
@flamio_method
def delete_mix(self, *args, **kwargs):
flamio.delete_mix(self.info, *args, **kwargs)
@flamio_method
def add_mix_item(self, *args, **kwargs):
flamio.add_mix_item(self.info, *args, **kwargs)
@flamio_method
def get_mix_item(self, *args, **kwargs):
return flamio.get_mix_item(self.info, *args, **kwargs)
@flamio_method
def edit_mix_item(self, *args, **kwargs):
flamio.edit_mix_item(self.info, *args, **kwargs)
@flamio_method
def delete_mix_item(self, *args, **kwargs):
flamio.delete_mix_item(self.info, *args, **kwargs)
@flamio_method
def multidelete_mix_items(self, *args, **kwargs):
flamio.multidelete_mix_items(self.info, *args, **kwargs)
@flamio_method
def duplicate_mix_item(self, *args, **kwargs):
flamio.duplicate_mix_item(self.info, *args, **kwargs)
@flamio_method
def move_mix_item(self, *args, **kwargs):
flamio.move_mix_item(self.info, *args, **kwargs)
@flamio_method
def swap_mix_items(self, *args, **kwargs):
flamio.swap_mix_items(self.info, *args, **kwargs)
@flamio_method
def add_mix_track(self, *args, **kwargs):
flamio.add_mix_track(self.info, *args, **kwargs)
@flamio_method
def add_mix_pause(self, *args, **kwargs):
flamio.add_mix_pause(self.info, *args, **kwargs)
@flamio_method
def add_mix_mix(self, *args, **kwargs):
flamio.add_mix_mix(self.info, *args, **kwargs)
@flamio_method
def edit_mix_track(self, *args, **kwargs):
flamio.edit_mix_track(self.info, *args, **kwargs)
@flamio_method
def edit_mix_pause(self, *args, **kwargs):
flamio.edit_mix_pause(self.info, *args, **kwargs)
@flamio_method
def edit_mix_mix(self, *args, **kwargs):
flamio.edit_mix_mix(self.info, *args, **kwargs)
@flamio_method
def get_track_play_info(self, *args, **kwargs):
return flamio.get_track_play_info(self.info, *args, **kwargs)
@flamio_method
def get_item_play_info(self, *args, **kwargs):
return flamio.get_item_play_info(self.info, *args, **kwargs)
@flamio_method
def get_mix_play_info(self, *args, **kwargs):
return flamio.get_mix_play_info(self.info, *args, **kwargs)
| 29.996403
| 79
| 0.61794
| 1,050
| 8,339
| 4.644762
| 0.066667
| 0.229649
| 0.314948
| 0.202994
| 0.886406
| 0.816691
| 0.617183
| 0.532089
| 0.438384
| 0.055362
| 0
| 0.002042
| 0.236599
| 8,339
| 277
| 80
| 30.104693
| 0.764059
| 0.034896
| 0
| 0.319797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.324873
| false
| 0.020305
| 0.010152
| 0.071066
| 0.42132
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a7da3df60c7069ffd260bfd4d586aa5bd98e007
| 253
|
py
|
Python
|
addons/hr/models/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/hr/models/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/hr/models/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import hr
from . import res_config_settings
from . import mail_alias
from . import mail_channel
from . import res_partner
from . import res_users
| 25.3
| 74
| 0.758893
| 39
| 253
| 4.769231
| 0.666667
| 0.322581
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004762
| 0.16996
| 253
| 9
| 75
| 28.111111
| 0.880952
| 0.371542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a7db2cbbd35ce5421bf9dc46c63d0d880dfea45
| 1,460
|
py
|
Python
|
controllers/proc.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
controllers/proc.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
controllers/proc.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
# -*- coding: utf-8 -*-
"""
Procurement
A module to handle Procurement
Currently handles
Suppliers
Planned Procurements
Purchase Orders (POs)
@ToDo: Extend to
Purchase Requests (PRs)
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(c)
# -----------------------------------------------------------------------------
def order():
""" RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.proc_rheader,
hide_filter = True,
)
# -----------------------------------------------------------------------------
#def order_item():
# """ RESTful CRUD controller """
# return s3_rest_controller()
# -----------------------------------------------------------------------------
def plan():
""" RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.proc_rheader,
hide_filter = True,
)
# -----------------------------------------------------------------------------
def supplier():
""" RESTful CRUD controller """
return s3_rest_controller("org", "organisation")
# END =========================================================================
| 26.545455
| 79
| 0.377397
| 102
| 1,460
| 5.254902
| 0.539216
| 0.08209
| 0.156716
| 0.201493
| 0.466418
| 0.466418
| 0.466418
| 0.30597
| 0.30597
| 0.30597
| 0
| 0.009786
| 0.230137
| 1,460
| 54
| 80
| 27.037037
| 0.467082
| 0.576712
| 0
| 0.285714
| 0
| 0
| 0.062385
| 0
| 0
| 0
| 0
| 0.018519
| 0
| 1
| 0.285714
| true
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6a91a4a394e0bfc87d46febc7702653bfa739570
| 180
|
py
|
Python
|
class9/ex8/mytest/__init__.py
|
daveg999/Automation_class
|
d23652ecae56b790684971dda6e85a1d2367e22b
|
[
"Apache-2.0"
] | null | null | null |
class9/ex8/mytest/__init__.py
|
daveg999/Automation_class
|
d23652ecae56b790684971dda6e85a1d2367e22b
|
[
"Apache-2.0"
] | null | null | null |
class9/ex8/mytest/__init__.py
|
daveg999/Automation_class
|
d23652ecae56b790684971dda6e85a1d2367e22b
|
[
"Apache-2.0"
] | null | null | null |
from mytest.simple import func1
from mytest.whatever import func2
from mytest.world import func3
from mytest.world import MyClass
__all__ = ('func1', 'func2', 'func3', 'MyClass')
| 25.714286
| 48
| 0.772222
| 25
| 180
| 5.4
| 0.44
| 0.296296
| 0.222222
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038217
| 0.127778
| 180
| 6
| 49
| 30
| 0.821656
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6aae89b56bc757f98c51d3d3665f0f64e58fb84a
| 35
|
py
|
Python
|
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
from disnake.types.widget import *
| 17.5
| 34
| 0.8
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ad15c947cfe5253b488718e212935b7005da018
| 27,279
|
py
|
Python
|
tests/components/zeroconf/test_init.py
|
jonasjeeliasson/core
|
0301706fc631ad1f2cd2532667ba9dfe2f856198
|
[
"Apache-2.0"
] | 1
|
2019-08-19T18:18:50.000Z
|
2019-08-19T18:18:50.000Z
|
tests/components/zeroconf/test_init.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 70
|
2020-08-05T07:20:00.000Z
|
2022-03-31T06:01:46.000Z
|
tests/components/zeroconf/test_init.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 2
|
2020-06-03T20:24:39.000Z
|
2020-06-06T19:52:09.000Z
|
"""Test Zeroconf component setup process."""
from unittest.mock import patch
from zeroconf import (
BadTypeInNameException,
Error as ZeroconfError,
InterfaceChoice,
IPVersion,
ServiceInfo,
ServiceStateChange,
)
from homeassistant.components import zeroconf
from homeassistant.components.zeroconf import CONF_DEFAULT_INTERFACE, CONF_IPV6
from homeassistant.const import (
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.generated import zeroconf as zc_gen
from homeassistant.setup import async_setup_component
NON_UTF8_VALUE = b"ABCDEF\x8a"
NON_ASCII_KEY = b"non-ascii-key\x8a"
PROPERTIES = {
b"macaddress": b"ABCDEF012345",
b"non-utf8-value": NON_UTF8_VALUE,
NON_ASCII_KEY: None,
}
HOMEKIT_STATUS_UNPAIRED = b"1"
HOMEKIT_STATUS_PAIRED = b"0"
_ROUTE_NO_LOOPBACK = (
{
"attrs": [
("RTA_TABLE", 254),
("RTA_DST", "224.0.0.251"),
("RTA_OIF", 4),
("RTA_PREFSRC", "192.168.1.5"),
],
},
)
_ROUTE_LOOPBACK = (
{
"attrs": [
("RTA_TABLE", 254),
("RTA_DST", "224.0.0.251"),
("RTA_OIF", 4),
("RTA_PREFSRC", "127.0.0.1"),
],
},
)
def service_update_mock(zeroconf, services, handlers, *, limit_service=None):
"""Call service update handler."""
for service in services:
if limit_service is not None and service != limit_service:
continue
handlers[0](zeroconf, service, f"_name.{service}", ServiceStateChange.Added)
def get_service_info_mock(service_type, name):
"""Return service info for get_service_info."""
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties=PROPERTIES,
)
def get_service_info_mock_without_an_address(service_type, name):
"""Return service info for get_service_info without any addresses."""
return ServiceInfo(
service_type,
name,
addresses=[],
port=80,
weight=0,
priority=0,
server="name.local.",
properties=PROPERTIES,
)
def get_homekit_info_mock(model, pairing_status):
"""Return homekit info for get_service_info for an homekit device."""
def mock_homekit_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"md": model.encode(), b"sf": pairing_status},
)
return mock_homekit_info
def get_zeroconf_info_mock(macaddress):
"""Return info for get_service_info for an zeroconf device."""
def mock_zc_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"macaddress": macaddress.encode()},
)
return mock_zc_info
def get_zeroconf_info_mock_manufacturer(manufacturer):
"""Return info for get_service_info for an zeroconf device."""
def mock_zc_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"manufacturer": manufacturer.encode()},
)
return mock_zc_info
async def test_setup(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
expected_flow_calls = 0
for matching_components in zc_gen.ZEROCONF.values():
domains = set()
for component in matching_components:
if len(component) == 1:
domains.add(component["domain"])
expected_flow_calls += len(domains)
assert len(mock_config_flow.mock_calls) == expected_flow_calls
# Test instance is set.
assert "zeroconf" in hass.data
assert await hass.components.zeroconf.async_get_instance() is mock_zeroconf
async def test_setup_with_overly_long_url_and_name(hass, mock_zeroconf, caplog):
"""Test we still setup with long urls and names."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.get_url",
return_value="https://this.url.is.way.too.long/very/deep/path/that/will/make/us/go/over/the/maximum/string/length/and/would/cause/zeroconf/to/fail/to/startup/because/the/key/and/value/can/only/be/255/bytes/and/this/string/is/a/bit/longer/than/the/maximum/length/that/we/allow/for/a/value",
), patch.object(
hass.config,
"location_name",
"\u00dcBER \u00dcber German Umlaut long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string",
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert "https://this.url.is.way.too.long" in caplog.text
assert "German Umlaut" in caplog.text
async def test_setup_with_default_interface(hass, mock_zeroconf):
"""Test default interface config."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_DEFAULT_INTERFACE: True}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.Default)
async def test_setup_without_default_interface(hass, mock_zeroconf):
"""Test without default interface config."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_DEFAULT_INTERFACE: False}}
)
assert mock_zeroconf.called_with()
async def test_setup_without_ipv6(hass, mock_zeroconf):
"""Test without ipv6."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_IPV6: False}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(ip_version=IPVersion.V4Only)
async def test_setup_with_ipv6(hass, mock_zeroconf):
"""Test without ipv6."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_IPV6: True}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with()
async def test_setup_with_ipv6_default(hass, mock_zeroconf):
"""Test without ipv6 as default."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with()
async def test_service_with_invalid_name(hass, mock_zeroconf, caplog):
"""Test we do not crash on service with an invalid name."""
with patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = BadTypeInNameException
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert "Failed to get info for device" in caplog.text
async def test_zeroconf_match_macaddress(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_http._tcp.local.",
"Shelly108._http._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{
"_http._tcp.local.": [
{"domain": "shelly", "name": "shelly*", "macaddress": "FFAADD*"}
]
},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_zeroconf_info_mock(
"FFAADDCC11DD"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "shelly"
async def test_zeroconf_match_manufacturer(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_airplay._tcp.local.",
"s1000._airplay._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_airplay._tcp.local.": [{"domain": "samsungtv", "manufacturer": "samsung*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = (
get_zeroconf_info_mock_manufacturer("Samsung Electronics")
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "samsungtv"
async def test_zeroconf_no_match(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_http._tcp.local.",
"somethingelse._http._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_http._tcp.local.": [{"domain": "shelly", "name": "shelly*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_zeroconf_info_mock(
"FFAADDCC11DD"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 0
async def test_zeroconf_no_match_manufacturer(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_airplay._tcp.local.",
"s1000._airplay._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_airplay._tcp.local.": [{"domain": "samsungtv", "manufacturer": "samsung*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = (
get_zeroconf_info_mock_manufacturer("Not Samsung Electronics")
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 0
async def test_homekit_match_partial_space(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"LIFX bulb", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "lifx"
async def test_homekit_match_partial_dash(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._udp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._udp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"Rachio-fa46ba", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "rachio"
async def test_homekit_match_full(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._udp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._udp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"BSB002", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "hue"
async def test_homekit_already_paired(hass, mock_zeroconf):
"""Test that an already paired device is sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"tado", HOMEKIT_STATUS_PAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 2
assert mock_config_flow.mock_calls[0][1][0] == "tado"
assert mock_config_flow.mock_calls[1][1][0] == "homekit_controller"
async def test_homekit_invalid_paring_status(hass, mock_zeroconf):
"""Test that missing paring data is not sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"tado", b"invalid"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "tado"
async def test_homekit_not_paired(hass, mock_zeroconf):
"""Test that an not paired device is sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"this_will_not_match_any_integration", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "homekit_controller"
async def test_info_from_service_non_utf8(hass):
"""Test info_from_service handles non UTF-8 property keys and values correctly."""
service_type = "_test._tcp.local."
info = zeroconf.info_from_service(
get_service_info_mock(service_type, f"test.{service_type}")
)
raw_info = info["properties"].pop("_raw", False)
assert raw_info
assert len(raw_info) == len(PROPERTIES) - 1
assert NON_ASCII_KEY not in raw_info
assert len(info["properties"]) <= len(raw_info)
assert "non-utf8-value" not in info["properties"]
assert raw_info["non-utf8-value"] is NON_UTF8_VALUE
async def test_info_from_service_with_addresses(hass):
"""Test info_from_service does not throw when there are no addresses."""
service_type = "_test._tcp.local."
info = zeroconf.info_from_service(
get_service_info_mock_without_an_address(service_type, f"test.{service_type}")
)
assert info is None
async def test_get_instance(hass, mock_zeroconf):
"""Test we get an instance."""
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
assert await hass.components.zeroconf.async_get_instance() is mock_zeroconf
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert len(mock_zeroconf.ha_close.mock_calls) == 1
async def test_removed_ignored(hass, mock_zeroconf):
"""Test we remove it when a zeroconf entry is removed."""
mock_zeroconf.get_service_info.side_effect = ZeroconfError
def service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf, "_service.added", "name._service.added", ServiceStateChange.Added
)
handlers[0](
zeroconf,
"_service.updated",
"name._service.updated",
ServiceStateChange.Updated,
)
handlers[0](
zeroconf,
"_service.removed",
"name._service.removed",
ServiceStateChange.Removed,
)
with patch.object(zeroconf, "HaServiceBrowser", side_effect=service_update_mock):
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_zeroconf.get_service_info.mock_calls) == 2
assert mock_zeroconf.get_service_info.mock_calls[0][1][0] == "_service.added"
assert mock_zeroconf.get_service_info.mock_calls[1][1][0] == "_service.updated"
async def test_async_detect_interfaces_setting_non_loopback_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns a non-loopback address."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route",
return_value=_ROUTE_NO_LOOPBACK,
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.Default)
async def test_async_detect_interfaces_setting_loopback_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns a loopback address."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route", return_value=_ROUTE_LOOPBACK
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
async def test_async_detect_interfaces_setting_empty_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns nothing."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch("homeassistant.components.zeroconf.IPRoute.route", return_value=[]):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
async def test_async_detect_interfaces_setting_exception(hass, mock_zeroconf):
"""Test without default interface config and the route throws an exception."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route", side_effect=AttributeError
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
| 39.306916
| 644
| 0.691118
| 3,381
| 27,279
| 5.265898
| 0.08045
| 0.045832
| 0.038531
| 0.055044
| 0.841384
| 0.825489
| 0.797012
| 0.768872
| 0.76275
| 0.756684
| 0
| 0.009475
| 0.206899
| 27,279
| 693
| 645
| 39.363636
| 0.81345
| 0.019026
| 0
| 0.66055
| 0
| 0.00367
| 0.133517
| 0.016585
| 0
| 0
| 0
| 0
| 0.150459
| 1
| 0.025688
| false
| 0
| 0.012844
| 0.005505
| 0.053211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ae72cfec4005fca8b1e017f5de937bb3697240a
| 97,927
|
py
|
Python
|
tests/python/unittest/test_gluon_probability_v1.py
|
PawelGlomski-Intel/incubator-mxnet
|
13e9d572b3059ebe0d1d4f6d452db4f971375588
|
[
"Apache-2.0",
"MIT"
] | 2
|
2019-01-15T07:34:36.000Z
|
2019-06-13T04:46:31.000Z
|
tests/python/unittest/test_gluon_probability_v1.py
|
PawelGlomski-Intel/incubator-mxnet
|
13e9d572b3059ebe0d1d4f6d452db4f971375588
|
[
"Apache-2.0",
"MIT"
] | 27
|
2020-02-28T19:54:08.000Z
|
2020-09-20T02:39:46.000Z
|
tests/python/unittest/test_gluon_probability_v1.py
|
PawelGlomski-Intel/incubator-mxnet
|
13e9d572b3059ebe0d1d4f6d452db4f971375588
|
[
"Apache-2.0",
"MIT"
] | 5
|
2020-08-18T19:16:21.000Z
|
2020-09-10T20:30:44.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test gluon.probability with HybridBlock.hybrid_forward api
"""
import mxnet as mx
import numpy as _np
from mxnet import np, npx, autograd
from mxnet import gluon
import mxnet.gluon.probability as mgp
from mxnet.gluon.probability import StochasticBlock, StochasticSequential
from mxnet.gluon import HybridBlock
from mxnet.test_utils import use_np, assert_almost_equal
from numpy.testing import assert_array_equal
import pytest
import scipy.stats as ss
import scipy.special as scipy_special
import itertools
from numbers import Number
def prob_to_logit(prob):
return np.log(prob) - np.log1p(-prob)
def _distribution_method_invoker(dist, func, *args):
"""Wrapper for invoking different types of class methods with one unified
interface.
Parameters
----------
dist : Distribution
func : method
"""
if (len(args) == 0):
out = getattr(dist, func)
if callable(out):
return out()
else:
return out
return getattr(dist, func)(*args)
def test_mgp_getF_v1():
# Test getF
getF = mgp.utils.getF
nd = mx.nd
sym = mx.sym
assert getF(nd.ones((2, 2)), nd.ones((2, 2))) == nd
assert getF(sym.ones((2, 2)), sym.ones((2, 2))) == sym
assert getF(1.0, 2.0) == nd
# Test exception
with pytest.raises(TypeError):
getF(nd.ones((2, 2)), sym.ones((2, 2)))
getF(sym.ones((2, 2)), nd.ones((2, 2)))
@use_np
def test_gluon_uniform_v1():
class TestUniform(HybridBlock):
def __init__(self, func):
super(TestUniform, self).__init__()
self._func = func
def hybrid_forward(self, F, low, high, *args):
uniform = mgp.Uniform(low, high, validate_args=True)
return _distribution_method_invoker(uniform, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("log_prob")
if hybridize:
net.hybridize()
for i in range(2):
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("cdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestUniform("icdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
net = TestUniform("entropy")
if hybridize:
net.hybridize()
mx_out = net(low, high).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_normal_v1():
class TestNormal(HybridBlock):
def __init__(self, func):
super(TestNormal, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
normal = mgp.Normal(loc, scale, validate_args=True)
return _distribution_method_invoker(normal, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestNormal("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestNormal("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_laplace_v1():
class TestLaplace(HybridBlock):
def __init__(self, func):
super(TestLaplace, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
laplace = mgp.Laplace(loc, scale, validate_args=True)
return _distribution_method_invoker(laplace, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestLaplace("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestLaplace("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_cauchy_v1():
class TestCauchy(HybridBlock):
def __init__(self, func):
self._func = func
super(TestCauchy, self).__init__()
def hybrid_forward(self, F, loc, scale, *args):
cauchy = mgp.Cauchy(loc, scale, F, validate_args=True)
return _distribution_method_invoker(cauchy, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(loc, scale)
desired_shape = (shape,) if isinstance(shape, Number) else shape
assert mx_out.shape == desired_shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape, low=1e-4, high=1.0-1e-4)
net = TestCauchy("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestCauchy("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_half_cauchy_v1():
class TestHalfCauchy(HybridBlock):
def __init__(self, func):
super(TestHalfCauchy, self).__init__()
self._func = func
def hybrid_forward(self, F, scale, *args):
half_normal = mgp.HalfCauchy(scale, F, validate_args=True)
return getattr(half_normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestHalfCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
if isinstance(shape, Number):
shape = (shape,)
assert mx_out.shape == shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape, high=1.0-1e-4)
net = TestHalfCauchy("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_poisson_v1():
class TestPoisson(HybridBlock):
def __init__(self, func):
self._func = func
super(TestPoisson, self).__init__()
def hybrid_forward(self, F, rate, *args):
poisson = mgp.Poisson(rate, F, validate_args=True)
return _distribution_method_invoker(poisson, self._func, *args)
shapes = [(1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [False]):
rate = np.random.uniform(0.5, 1.5, shape)
net = TestPoisson("sample")
if hybridize:
net.hybridize()
mx_out = net(rate).asnumpy()
assert mx_out.shape == rate.shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
rate = np.random.uniform(0.5, 1.5, shape)
samples = np.random.randint(0, 5, shape).astype('float')
net = TestPoisson("log_prob")
if hybridize:
net.hybridize()
mx_out = net(rate, samples).asnumpy()
np_out = ss.poisson(mu=rate.asnumpy()).logpmf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_geometric_v1():
class TestGeometric(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestGeometric, self).__init__()
self._is_logit = is_logit
self._func = func
def hybrid_forward(self, F, params, *args):
dist = mgp.Geometric(logit=params, validate_args=True) if self._is_logit else \
mgp.Geometric(prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = np.random.randint(0, 10, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = ss.geom.logpmf(sample.asnumpy() + 1, prob.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test variance
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("variance", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.geom(prob.asnumpy()).var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
# Add lower bound constraint, otherwise scipy would raise warning.
prob = np.random.uniform(low=0.1, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("entropy", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.geom(prob.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_negative_binomial_v1():
class TestNegativeBinomial(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestNegativeBinomial, self).__init__()
self._is_logit = is_logit
self._func = func
def hybrid_forward(self, F, n, params, *args):
dist = mgp.NegativeBinomial(n=n, logit=params, validate_args=True) if self._is_logit else \
mgp.NegativeBinomial(n=n, prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
n = np.random.randint(1, 10, size=shape).astype('float32')
prob = np.random.uniform(low=0.2, high=0.6, size=shape).astype('float32')
sample = np.random.randint(0, 10, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestNegativeBinomial("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(n, param, sample).asnumpy()
np_out = ss.nbinom(n=n.asnumpy(), p=prob.asnumpy()
).logpmf(sample.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test mean and variance
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
for use_logit in [True, False]:
n = np.random.randint(1, 10, size=shape).astype('float32')
prob = np.random.uniform(low=0.2, high=0.6, size=shape).astype('float32')
net = TestNegativeBinomial(func, use_logit)
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(n, param).asnumpy()
ss_nbinom = ss.nbinom(n=n.asnumpy(), p=1 - prob.asnumpy())
if func == 'mean':
np_out = ss_nbinom.mean()
else:
np_out = ss_nbinom.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_exponential_v1():
class TestExponential(HybridBlock):
def __init__(self, func):
self._func = func
super(TestExponential, self).__init__()
def hybrid_forward(self, F, scale, *args):
exponential = mgp.Exponential(scale, F, validate_args=True)
return _distribution_method_invoker(exponential, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.2, 1.2, size=shape)
net = TestExponential("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.2, 1.2, size=shape)
net = TestExponential("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.0, 1.0, size=shape)
net = TestExponential("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestExponential("entropy")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_weibull_v1():
class TestWeibull(HybridBlock):
def __init__(self, func):
super(TestWeibull, self).__init__()
self._func = func
def hybrid_forward(self, F, concentration, scale, *args):
weibull = mgp.Weibull(concentration, scale, F, validate_args=True)
return _distribution_method_invoker(weibull, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("log_prob")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("cdf")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("icdf")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
net = TestWeibull("entropy")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(),
scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_pareto_v1():
class TestPareto(HybridBlock):
def __init__(self, func):
super(TestPareto, self).__init__()
self._func = func
def hybrid_forward(self, F, alpha, scale, *args):
pareto = mgp.Pareto(alpha, scale, F, validate_args=True)
return _distribution_method_invoker(pareto, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(1, 2, size=shape)
net = TestPareto("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).logpdf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(1.0, 2.0, size=shape)
net = TestPareto("cdf")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).cdf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestPareto("icdf")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).ppf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
net = TestPareto("entropy")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_gamma_v1():
class TestGamma(HybridBlock):
def __init__(self, func):
super(TestGamma, self).__init__()
self._func = func
def hybrid_forward(self, F, shape, scale, *args):
gamma = mgp.Gamma(shape, scale, F, validate_args=True)
return _distribution_method_invoker(gamma, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(0.5, 1.5, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestGamma("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.gamma(a=alpha.asnumpy(), loc=0,
scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance', 'entropy']:
alpha = np.random.uniform(0.5, 1.5, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestGamma(func)
if hybridize:
net.hybridize()
mx_out = net(alpha, scale).asnumpy()
ss_gamma = ss.gamma(a=alpha.asnumpy(), loc=0,
scale=scale.asnumpy())
if func == 'mean':
np_out = ss_gamma.mean()
elif func == 'variance':
np_out = ss_gamma.var()
else:
np_out = ss_gamma.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_dirichlet_v1():
class TestDirichlet(HybridBlock):
def __init__(self, func):
super(TestDirichlet, self).__init__()
self._func = func
def hybrid_forward(self, F, alpha, *args):
dirichlet = mgp.Dirichlet(alpha, F, validate_args=True)
return _distribution_method_invoker(dirichlet, self._func, *args)
event_shapes = [2, 4, 6]
batch_shapes = [None, (2, 3)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for hybridize in [True, False]:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, size=desired_shape)
net = TestDirichlet("sample")
if hybridize:
net.hybridize()
mx_out = net(alpha).asnumpy()
# Check shape
assert mx_out.shape == desired_shape
# Check simplex
assert_almost_equal(mx_out.sum(-1), _np.ones_like(mx_out.sum(-1)), atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test log_prob
# Scipy does not support batch `alpha`, thus we skip multi-dimensional batch_shape case.
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]):
for hybridize in [True, False]:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, desired_shape)
np_samples = _np.random.dirichlet(
[10.0 / event_shape] * event_shape, size=batch_shape)
net = TestDirichlet("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, np.array(np_samples)).asnumpy()
np_out = ss.dirichlet(alpha=alpha.asnumpy()).logpdf(np_samples)
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]):
for hybridize in [False]:
for func in ['mean', 'variance', 'entropy']:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, desired_shape)
net = TestDirichlet(func)
if hybridize:
net.hybridize()
mx_out = net(alpha).asnumpy()
ss_dir = ss.dirichlet(alpha=alpha.asnumpy())
if func == 'mean':
np_out = ss_dir.mean()
elif func == 'variance':
np_out = ss_dir.var()
else:
np_out = ss_dir.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_beta_v1():
class TestBeta(HybridBlock):
def __init__(self, func):
super(TestBeta, self).__init__()
self._func = func
def hybrid_forward(self, F, alpha, beta, *args):
beta_dist = mgp.Beta(alpha, beta, F, validate_args=True)
return _distribution_method_invoker(beta_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(0.5, 1.5, shape)
beta = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestBeta("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, beta, samples).asnumpy()
np_out = ss.beta(alpha.asnumpy(), beta.asnumpy()
).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance', 'entropy']:
alpha = np.random.uniform(0.5, 1.5, shape)
beta = np.random.uniform(0.5, 1.5, shape)
net = TestBeta(func)
if hybridize:
net.hybridize()
mx_out = net(alpha, beta).asnumpy()
ss_beta = ss.beta(alpha.asnumpy(), beta.asnumpy())
if func == 'mean':
np_out = ss_beta.mean()
elif func == 'variance':
np_out = ss_beta.var()
else:
np_out = ss_beta.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_fisher_snedecor_v1():
class TestFisherSnedecor(HybridBlock):
def __init__(self, func):
super(TestFisherSnedecor, self).__init__()
self._func = func
def hybrid_forward(self, F, df1, df2, *args):
beta_dist = mgp.FisherSnedecor(df1, df2, F, validate_args=True)
return _distribution_method_invoker(beta_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
df1 = np.random.uniform(0.5, 1.5, shape)
df2 = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestFisherSnedecor("log_prob")
if hybridize:
net.hybridize()
mx_out = net(df1, df2, samples).asnumpy()
np_out = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy()
).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean` and `var`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
df1 = np.random.uniform(0.5, 1.5, shape)
df2 = np.random.uniform(4.0, 6.0, shape)
net = TestFisherSnedecor(func)
if hybridize:
net.hybridize()
mx_out = net(df1, df2).asnumpy()
ss_f = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy())
if func == 'mean':
np_out = ss_f.mean()
else:
np_out = ss_f.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_student_t_v1():
class TestT(HybridBlock):
def __init__(self, func):
super(TestT, self).__init__()
self._func = func
def hybrid_forward(self, F, df, loc, scale, *args):
t_dist = mgp.StudentT(df, loc, scale, F, validate_args=True)
return _distribution_method_invoker(t_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.zeros(shape)
scale = np.random.uniform(0.5, 1.5, shape)
df = np.random.uniform(2, 4, shape)
samples = np.random.uniform(0, 4, size=shape)
net = TestT("log_prob")
if hybridize:
net.hybridize()
mx_out = net(df, loc, scale, samples).asnumpy()
np_out = ss.t(loc=0, scale=scale.asnumpy(),
df=df.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [False, True]):
for func in ['mean', 'variance', 'entropy']:
loc = np.zeros(shape)
scale = np.random.uniform(0.5, 1.5, shape)
df = np.random.uniform(3, 4, shape)
net = TestT(func)
if hybridize:
net.hybridize()
mx_out = net(df, loc, scale).asnumpy()
ss_f = ss.t(loc=0, scale=scale.asnumpy(), df=df.asnumpy())
if func == 'mean':
np_out = ss_f.mean()
elif func == 'variance':
np_out = ss_f.var()
else:
np_out = ss_f.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_gumbel_v1():
class TestGumbel(HybridBlock):
def __init__(self, func):
super(TestGumbel, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
normal = mgp.Gumbel(loc, scale, F, validate_args=True)
return getattr(normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestGumbel("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc=loc.asnumpy(),
scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestGumbel("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestGumbel("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestGumbel("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_multinomial_v1():
class TestMultinomial(HybridBlock):
def __init__(self, func, num_events, total_count, is_logit, batch_shape=None, sample_shape=None):
super(TestMultinomial, self).__init__()
self._num_events = num_events
self._total_count = total_count
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._sample_shape = sample_shape
def hybrid_forward(self, F, params, *args):
multinomial = (
mgp.Multinomial(self._num_events, logit=params, total_count=self._total_count,
validate_args=True)
if self._is_logit else
mgp.Multinomial(self._num_events, prob=params, total_count=self._total_count,
validate_args=True)
)
if self._func == 'sample':
return multinomial.sample(self._batch_shape)
if self._func == 'sample_n':
return multinomial.sample_n(self._sample_shape)
return _distribution_method_invoker(multinomial, self._func, *args)
def one_hot(a, num_classes):
return np.identity(num_classes)[a]
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [None, (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestMultinomial("sample", event_shape, _np.random.randint(1, 5),
use_logit, batch_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
# Test sample_n
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestMultinomial("sample_n", event_shape, _np.random.randint(1, 5),
use_logit, batch_shape, sample_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
sample_shape = () if sample_shape is None else sample_shape
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape + (event_shape,)
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
sample_shape = () if sample_shape is None else sample_shape
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
samples = one_hot(samples, event_shape)
if use_logit:
param = np.log(param)
net = TestMultinomial("log_prob", event_shape,
_np.random.randint(1, 5), use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, samples).asnumpy()
# Check shape
assert mx_out.shape == desired_shape
@use_np
def test_gluon_binomial_v1():
class TestBinomial(HybridBlock):
def __init__(self, func, is_logit=False, n=1):
super(TestBinomial, self).__init__()
self._is_logit = is_logit
self._func = func
self._n = n
def hybrid_forward(self, F, params, *args):
dist = mgp.Binomial(n=self._n, logit=params, validate_args=True) \
if self._is_logit else \
mgp.Binomial(n=self._n, prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
for use_logit in [True, False]:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
net = TestBinomial('sample', use_logit, n=float(n))
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert mx_out.shape == desired_shape
# Test sample_n
prefix_shape = (2, 3)
for shape in shapes:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
dist = mgp.Binomial(n=n, prob=prob)
samples = dist.sample_n(prefix_shape)
assert samples.shape == (prefix_shape + prob.shape)
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
sample = np.random.randint(0, n, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBinomial("log_prob", use_logit, n=float(n))
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = ss.binom(n=n, p=prob.asnumpy()).logpmf(sample.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test mean and variance
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
for use_logit in [True, False]:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
net = TestBinomial(func, use_logit, n=float(n))
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
ss_binom = ss.binom(n=n, p=prob.asnumpy())
if func == 'mean':
np_out = ss_binom.mean()
else:
np_out = ss_binom.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
@pytest.mark.flaky
def test_gluon_bernoulli_v1():
class TestBernoulli(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestBernoulli, self).__init__()
self._is_logit = is_logit
self._func = func
def hybrid_forward(self, F, params, *args):
bernoulli = mgp.Bernoulli(logit=params, validate_args=True) if self._is_logit else \
mgp.Bernoulli(prob=params, validate_args=True)
return _distribution_method_invoker(bernoulli, self._func, *args)
# Test log_prob
shapes = [(), (1,), (2, 3), 6]
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = _np.log(ss.bernoulli.pmf(sample.asnumpy(), prob.asnumpy()))
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test variance
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("variance", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.bernoulli(prob.asnumpy()).var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("entropy", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.bernoulli(prob.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_relaxed_bernoulli_v1():
class TestRelaxedBernoulli(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestRelaxedBernoulli, self).__init__()
self._is_logit = is_logit
self._func = func
def hybrid_forward(self, F, params, *args):
relaxed_bernoulli = mgp.RelaxedBernoulli(T=1.0, logit=params, validate_args=True)\
if self._is_logit else \
mgp.RelaxedBernoulli(T=1.0, prob=params, validate_args=True)
if self._func == "sample":
return relaxed_bernoulli.sample()
return _distribution_method_invoker(relaxed_bernoulli, self._func, *args)
def prob_to_logit(prob):
return np.log(prob) - np.log1p(-prob)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
param.attach_grad()
net = TestRelaxedBernoulli("sample", use_logit)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(param)
mx_out.backward()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert param.grad.shape == desired_shape
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = np.random.uniform(0.1, 0.9, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestRelaxedBernoulli("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert mx_out.shape == desired_shape
@use_np
def test_gluon_categorical_v1():
class TestCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None, sample_shape=None):
super(TestCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
self._sample_shape = sample_shape
def hybrid_forward(self, F, params, *args):
categorical = mgp.Categorical(self._num_events, logit=params, validate_args=True)\
if self._is_logit else \
mgp.Categorical(self._num_events, prob=params,
validate_args=True)
if self._func == "sample":
return categorical.sample(self._batch_shape)
if self._func == "sample_n":
return categorical.sample_n(self._sample_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("sample", use_logit,
batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape
# Test sample_n
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("sample_n",
is_logit=use_logit, batch_shape=batch_shape,
num_events=event_shape, sample_shape=sample_shape
)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob.astype('float32')
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
if use_logit:
param = np.log(param)
net = TestCategorical("log_prob", use_logit,
batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
# Check value
log_pmf, indices = np.broadcast_arrays(
np.log(prob), np.expand_dims(samples, -1))
if indices.ndim >= 1:
indices = indices[..., :1]
expect_log_prob = _np.take_along_axis(
log_pmf, indices.astype('int'), axis=-1).asnumpy()
assert_almost_equal(mx_out.asnumpy(), expect_log_prob.squeeze(), atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test enumerate_support
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("enumerate_support",
use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = (event_shape,) + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape
@use_np
def test_gluon_one_hot_categorical_v1():
def one_hot(a, num_classes):
return np.identity(num_classes)[a]
class TestOneHotCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None):
super(TestOneHotCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
def hybrid_forward(self, F, params, *args):
categorical = mgp.OneHotCategorical(num_events=self._num_events, logit=params) \
if self._is_logit else \
mgp.OneHotCategorical(num_events=self._num_events, prob=params)
if self._func == "sample":
return categorical.sample(self._batch_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"sample", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
samples = one_hot(samples, event_shape)
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"log_prob", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
# Test enumerate support
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"enumerate_support", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == (event_shape,) + \
desired_shape + (event_shape,)
@use_np
def test_relaxed_one_hot_categorical_v1():
class TestRelaxedOneHotCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None):
super(TestRelaxedOneHotCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
def hybrid_forward(self, F, params, *args):
categorical = mgp.RelaxedOneHotCategorical(T=1.0, num_events=self._num_events, logit=params) \
if self._is_logit else \
mgp.RelaxedOneHotCategorical(
T=1.0, num_events=self._num_events, prob=params)
if self._func == "sample":
return categorical.sample(self._batch_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
prob = prob.astype('float32')
param = prob
if use_logit:
param = np.log(param)
param.attach_grad()
net = TestRelaxedOneHotCategorical(
"sample", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(param)
mx_out.backward()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
assert param.grad.shape == param.shape
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
# Samples from a Relaxed One-hot Categorical lie on a simplex.
samples = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=desired_shape))
if use_logit:
param = np.log(param)
net = TestRelaxedOneHotCategorical(
"log_prob", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
@use_np
def test_gluon_mvn_v1():
class TestMVN(HybridBlock):
def __init__(self, func, param_type):
super(TestMVN, self).__init__()
self._func = func
# cov, precision or scale_tril
self._param_type = param_type
def hybrid_forward(self, F, loc, cov, *args):
mvn = mgp.MultivariateNormal(loc=loc, **{self._param_type: cov},
validate_args=True)
return _distribution_method_invoker(mvn, self._func, *args)
def _stable_inv(cov):
"""
Force the precision matrix to be symmetric.
"""
precision = np.linalg.inv(cov)
precision_t = np.swapaxes(precision, -1, -2)
return (precision + precision_t) / 2
event_shapes = [3, 5]
loc_shapes = [(), (2,), (4, 2)]
cov_shapes = [(), (2,), (4, 2)]
cov_func = {
'cov': lambda s: s,
'precision': lambda s: _stable_inv(s),
'scale_tril': lambda s: np.linalg.cholesky(s)
}
# Test sampling
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('sample', cov_type)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, cov_param)
desired_shape = (loc + sigma[..., 0]).shape
assert mx_out.shape == desired_shape
mx_out.backward()
assert loc.grad.shape == loc.shape
assert _s.grad.shape == _s.shape
# Test log_prob
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [True, False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
samples = np.random.normal(
np.zeros_like(loc), np.ones_like(_s[..., 0]))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('log_prob', cov_type)
if hybridize:
net.hybridize()
mx_out = net(loc, cov_param, samples)
assert mx_out.shape == samples.shape[:-1]
if mx_out.shape == ():
mx_out_t = mx_out.asnumpy()
else:
mx_out_t = mx_out.flatten()[0].asnumpy()
samples_t = samples.reshape(-1, event_shape).asnumpy()[0]
# Select the first element in the batch, because scipy does not support batching.
loc_t = loc.reshape(-1, event_shape)[0].asnumpy()
sigma_t = sigma.reshape(-1, event_shape,
event_shape)[0].asnumpy()
scipy_mvn = ss.multivariate_normal(loc_t, sigma_t)
ss_out = scipy_mvn.logpdf(samples_t)
assert_almost_equal(mx_out_t, ss_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [True, False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('entropy', cov_type)
if hybridize:
net.hybridize()
mx_out = net(loc, cov_param)
assert mx_out.shape == sigma.shape[:-2]
if mx_out.shape == ():
mx_out_t = mx_out.asnumpy()
else:
mx_out_t = mx_out.flatten()[0].asnumpy()
# Select the first element in the batch, because scipy does not support batching.
loc_t = loc.reshape(-1, event_shape)[0].asnumpy()
sigma_t = sigma.reshape(-1, event_shape,
event_shape)[0].asnumpy()
scipy_mvn = ss.multivariate_normal(loc_t, sigma_t)
ss_out = scipy_mvn.entropy()
assert_almost_equal(mx_out_t, ss_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_half_normal_v1():
class TestHalfNormal(HybridBlock):
def __init__(self, func):
super(TestHalfNormal, self).__init__()
self._func = func
def hybrid_forward(self, F, scale, *args):
half_normal = mgp.HalfNormal(scale, F, validate_args=True)
return getattr(half_normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestHalfNormal("sample")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
if isinstance(shape, Number):
shape = (shape,)
assert mx_out.shape == shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfNormal("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfNormal("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestHalfNormal("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_affine_transform_v1():
r"""
Test the correctness of affine transformation by performing it
on a standard normal, since N(\mu, \sigma^2) = \mu + \sigma * N(0, 1)
"""
class TestAffineTransform(HybridBlock):
def __init__(self, func):
super(TestAffineTransform, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
std_normal = mgp.Normal(F.np.zeros_like(loc),
F.np.ones_like(scale), F)
transforms = [mgp.AffineTransform(loc=0, scale=scale),
mgp.AffineTransform(loc=loc, scale=1)]
transformed_normal = mgp.TransformedDistribution(
std_normal, transforms)
if (len(args) == 0):
return getattr(transformed_normal, self._func)
return getattr(transformed_normal, self._func)(*args)
shapes = [(1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
samples = np.random.normal(size=shape)
net = TestAffineTransform('log_prob')
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, scale, samples)
np_out = _np.log(ss.norm(loc.asnumpy(),
scale.asnumpy()).pdf(samples.asnumpy()))
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
mx_out.backward()
loc_expected_grad = ((samples - loc) / scale ** 2).asnumpy()
scale_expected_grad = (samples - loc) ** 2 * \
np.power(scale, -3) - (1 / scale)
assert_almost_equal(loc.grad.asnumpy(), loc_expected_grad, atol=1e-4,
rtol=1e-3, use_broadcast=False)
assert_almost_equal(scale.grad.asnumpy(), scale_expected_grad, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
if not isinstance(shape, tuple):
shape = (shape,)
expected_shape = (4, 5) + shape
net = TestAffineTransform('sample')
mx_out = net(loc, scale, expected_shape).asnumpy()
assert mx_out.shape == expected_shape
@use_np
def test_compose_transform_v1():
class TestComposeTransform(HybridBlock):
def __init__(self, func):
super(TestComposeTransform, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
# Generate a log_normal distribution.
std_normal = mgp.Normal(F.np.zeros_like(loc),
F.np.ones_like(scale), F)
transforms = mgp.ComposeTransform([
mgp.AffineTransform(loc=0, scale=scale),
mgp.AffineTransform(loc=loc, scale=1),
mgp.ExpTransform()
])
transformed_normal = mgp.TransformedDistribution(
std_normal, transforms)
if (len(args) == 0):
return getattr(transformed_normal, self._func)
return getattr(transformed_normal, self._func)(*args)
shapes = [(1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
samples = np.random.uniform(1, 2, size=shape)
net = TestComposeTransform('log_prob')
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, scale, samples)
np_out = ss.lognorm(s=scale.asnumpy(), scale=np.exp(
loc).asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_cached_property_v1():
x = np.random.normal()
x.attach_grad()
scale = 0.1
class Dummy(object):
def __init__(self, x):
super(Dummy, self).__init__()
self.x = x
@mgp.cached_property
def y(self):
return scale * self.x + 1
with autograd.record():
obj = Dummy(x)
obj.y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
class DummyBlock(HybridBlock):
def hybrid_forward(self, F, x):
obj = Dummy(x)
return obj.y
x = np.random.normal()
x.attach_grad()
net = DummyBlock()
with autograd.record():
y = net(x)
y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
x = np.random.normal()
x.attach_grad()
net.hybridize()
with autograd.record():
y = net(x)
y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
@use_np
def test_independent_v1():
class TestIndependent(HybridBlock):
def __init__(self, event_dim, func):
super(TestIndependent, self).__init__()
self._event_dim = event_dim
self._func = func
def hybrid_forward(self, F, logit, *args):
base_dist = mgp.Bernoulli(logit=logit)
reshaped_dist = mgp.Independent(base_dist, self._event_dim)
return getattr(reshaped_dist, self._func)(*args)
event_shapes = [(1,), (4,), (2, 2)]
batch_shapes = [(2, 3), (2,)]
for (batch_shape, event_shape) in itertools.product(batch_shapes, event_shapes):
for hybridize in [False, True]:
for func in ['log_prob']:
full_shape = batch_shape + event_shape
logit = np.random.normal(0, 2, size=full_shape)
samples = np.round(np.random.uniform(size=full_shape))
net = TestIndependent(len(event_shape), func)
if hybridize:
net.hybridize()
mx_out = net(logit, samples)
assert mx_out.shape == batch_shape
@use_np
def test_gluon_kl_v1():
def _test_zero_kl(p, shape):
"""Check if KL(p || p) = 0
Parameters
----------
p : Distribution
"""
mx_out = mgp.kl_divergence(p, p).asnumpy()
np_out = _np.zeros(shape)
assert_almost_equal(mx_out, np_out, atol=1e-3,
rtol=1e-2, use_broadcast=False)
def _test_monte_carlo(p, q, M=50000):
r"""Check if KL(p || q) is approximately equal to
1/M * \Sum_{i=1}^{M} log(p(x_i) / q(x_i)), x_i ~ p(x)
"""
kl = mgp.kl_divergence(p, q)
mc_approx = mgp.empirical_kl(p, q, M)
assert_almost_equal(mc_approx.asnumpy(), kl.asnumpy(), atol=1e-1,
rtol=1e-1, use_broadcast=False)
def _dist_factory(dist, *param_funcs):
"""Generate a distribution object with parameters of random value.
Parameters
----------
dist : Type
A type of distribution.
param_funcs : List
A list of functions that generate valid parameters for `dist`
"""
params = [f() if callable(f) else f for f in param_funcs]
return dist(*params)
# could cause longer runtime and potential flaky tests
monte_carlo_test = False
repeated_times = 50000
shapes = [(), (1,), (2, 3), 6]
# Test kl between same distributions
# uniform
for shape in shapes:
dist = mgp.Uniform
def low(): return np.random.uniform(0, 1, shape)
def high(): return np.random.uniform(1, 2, shape)
_test_zero_kl(_dist_factory(dist, low, high), shape)
# normal, laplace, cauchy, gumbel
for dist in [mgp.Normal, mgp.Laplace, mgp.Cauchy, mgp.Gumbel]:
for shape in shapes:
def loc(): return np.random.uniform(-1, 1, shape)
def scale(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, loc, scale), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, loc, scale),
_dist_factory(dist, loc, scale),
repeated_times)
# poisson
for shape in shapes[1:]:
dist = mgp.Poisson
def rate(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, rate), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, rate),
_dist_factory(dist, rate),
repeated_times)
# exponential, geometric
for dist in [mgp.Exponential, mgp.Geometric]:
for shape in shapes:
def s(): return np.random.uniform(size=shape, low=1e-3)
_test_zero_kl(_dist_factory(dist, s), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, s),
_dist_factory(dist, s),
repeated_times)
# pareto
for shape in shapes:
dist = mgp.Pareto
def alpha(): return np.random.uniform(size=shape)
def scale(): return np.random.uniform(size=shape)
_test_zero_kl(_dist_factory(dist, alpha, scale), shape)
for shape in shapes:
dist = mgp.HalfNormal
def scale(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, scale), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, scale),
_dist_factory(dist, scale),
repeated_times)
# gamma, beta
for dist in [mgp.Gamma, mgp.Beta]:
for shape in shapes:
def param1(): return np.random.uniform(0.5, 1.5, shape)
def param2(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, param1, param2), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, param1, param2),
_dist_factory(dist, param1, param2),
50000)
# binomial
for shape in shapes:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
dist = mgp.Binomial(n=n, prob=prob)
_test_zero_kl(dist, shape)
# bernoulli
for shape in shapes:
prob = np.random.uniform(size=shape)
dist = mgp.Bernoulli(prob=prob)
_test_zero_kl(dist, shape)
event_shapes = [3, 5, 10]
loc_shapes = [(), (2,), (4, 2)]
cov_shapes = [(), (2,), (4, 2)]
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
sigma = np.matmul(_s, np.swapaxes(_s, -1, -2)) + np.eye(event_shape)
dist = mgp.MultivariateNormal(loc, cov=sigma)
desired_shape = (loc + sigma[..., 0]).shape[:-1]
_test_zero_kl(dist, desired_shape)
batch_shapes = loc_shapes
# dirichlet
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
desired_shape = (batch_shape if batch_shape is not None else ())
dist = mgp.Dirichlet
def alpha(): return np.random.uniform(
0.5, 1.5, size=(desired_shape + (event_shape,)))
_test_zero_kl(_dist_factory(dist, alpha), desired_shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, alpha),
_dist_factory(dist, alpha),
50000)
# categorical, One-hot categorical
for dist in [mgp.Categorical, mgp.OneHotCategorical]:
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
prob = (lambda:
np.array(_np.random.dirichlet([1 / event_shape] * event_shape, size=batch_shape)))
_test_zero_kl(_dist_factory(dist, event_shape, prob), batch_shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, event_shape, prob),
_dist_factory(dist, event_shape, prob),
repeated_times)
# Test kl between different distributions
# KL(Uniform || ...)
for shape in shapes:
rhs_dists = [
mgp.Normal(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gumbel(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
]
for rhs_dist in rhs_dists:
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
lhs_dist = mgp.Uniform(low, high)
kl = mgp.kl_divergence(lhs_dist, rhs_dist)
assert kl.shape == low.shape
if monte_carlo_test:
_test_monte_carlo(lhs_dist, rhs_dist, repeated_times)
# KL(Exponential || ...)
for shape in shapes:
rhs_dists = [
mgp.Normal(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gumbel(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gamma(np.random.uniform(0.5, 1.5, shape),
np.random.uniform(0.5, 1.5, shape))
]
for rhs_dist in rhs_dists:
s = np.random.uniform(size=shape)
lhs_dist = mgp.Exponential(s)
kl = mgp.kl_divergence(lhs_dist, rhs_dist)
assert kl.shape == s.shape
if monte_carlo_test:
_test_monte_carlo(lhs_dist, rhs_dist, repeated_times)
@pytest.mark.garbage_expected
@use_np
def test_gluon_stochastic_block_v1():
class dummyBlock(StochasticBlock):
"""In this test case, we generate samples from a Gaussian parameterized
by `loc` and `scale` and accumulate the KL-divergence between it and
its prior and the l2 norm of `loc` into the block's loss storage."""
@StochasticBlock.collectLoss
def hybrid_forward(self, F, loc, scale):
qz = mgp.Normal(loc, scale)
# prior
pz = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale))
self.add_loss(mgp.kl_divergence(qz, pz))
self.add_loss((loc ** 2).sum(1))
return qz.sample()
shape = (4, 4)
for hybridize in [True, False]:
net = dummyBlock()
if hybridize:
net.hybridize()
loc = np.random.randn(*shape)
scale = np.random.rand(*shape)
mx_out = net(loc, scale).asnumpy()
kl = net.losses[0].asnumpy()
l2_norm = net.losses[1].asnumpy()
assert mx_out.shape == loc.shape
assert kl.shape == loc.shape
assert l2_norm.shape == shape[:-1]
@use_np
def test_gluon_stochastic_block_exception_v1():
class problemBlock(StochasticBlock):
def hybrid_forward(self, F, loc, scale):
qz = mgp.Normal(loc, scale)
# prior
pz = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale))
self.add_loss(mgp.kl_divergence(qz, pz))
self.add_loss((loc ** 2).sum(1))
return qz.sample()
shape = (4, 4)
for hybridize in [True, False]:
net = problemBlock()
if hybridize:
net.hybridize()
loc = np.random.randn(*shape)
scale = np.random.rand(*shape)
with pytest.raises(ValueError):
mx_out = net(loc, scale).asnumpy()
@pytest.mark.garbage_expected
@use_np
def test_gluon_stochastic_sequential_v1():
class normalBlock(HybridBlock):
def hybrid_forward(self, F, x):
return (x + 1)
class stochasticBlock(StochasticBlock):
@StochasticBlock.collectLoss
def hybrid_forward(self, F, x):
self.add_loss(x ** 2)
self.add_loss(x - 1)
return (x + 1)
class problemBlock(StochasticBlock):
def hybrid_forward(self, F, x):
self.add_loss(x ** 2)
self.add_loss(x - 1)
return (x + 1)
shape = (4, 4)
for hybridize in [True, False]:
initial_value = np.ones(shape)
net = StochasticSequential()
net.add(stochasticBlock())
net.add(normalBlock())
net.add(stochasticBlock())
net.add(normalBlock())
if hybridize:
net.hybridize()
mx_out = net(initial_value).asnumpy()
assert_almost_equal(mx_out, _np.ones(shape) * 5)
accumulated_loss = net.losses
assert len(accumulated_loss) == 2
assert_almost_equal(accumulated_loss[0][0].asnumpy(), _np.ones(shape))
assert_almost_equal(
accumulated_loss[0][1].asnumpy(), _np.ones(shape) - 1)
assert_almost_equal(
accumulated_loss[1][0].asnumpy(), _np.ones(shape) * 9)
assert_almost_equal(
accumulated_loss[1][1].asnumpy(), _np.ones(shape) + 1)
for hybridize in [True, False]:
initial_value = np.ones(shape)
net = StochasticSequential()
net.add(stochasticBlock())
net.add(normalBlock())
net.add(problemBlock())
net.add(normalBlock())
if hybridize:
net.hybridize()
with pytest.raises(ValueError):
mx_out = net(initial_value).asnumpy()
@use_np
def test_gluon_constraint_v1():
class TestConstraint(HybridBlock):
def __init__(self, constraint_type):
super(TestConstraint, self).__init__()
self._constraint_type = getattr(mgp.constraint, constraint_type)
def hybrid_forward(self, F, *params):
value = params[0]
constraint_param = params[1:]
if len(constraint_param) == 0:
constraint = self._constraint_type()
else:
constraint = self._constraint_type(*constraint_param)
return constraint.check(value)
_s = np.random.randn(5, 10, 10)
psd_matrix = np.matmul(_s, np.swapaxes(_s, -1, -2)) + np.eye(_s.shape[-1])
constraints_zoo = [
# (constraint_type, constraint_param, test_samples)
('Real', (), [np.random.randn(2, 2)]),
('Boolean', (), [np.random.randint(0, 20, size=(2, 2)) % 2 == 0]),
('Interval', [np.zeros((2, 2)), np.ones(
(2, 2))], [np.random.rand(2, 2)]),
('OpenInterval', [np.zeros((2, 2)), np.ones(
(2, 2))], [np.random.rand(2, 2)]),
('HalfOpenInterval', [np.zeros((2, 2)),
np.ones((2, 2))], [np.random.rand(2, 2)]),
('IntegerInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10],
[np.random.randint(0, 10, size=(2, 2)).astype('float32')]),
('IntegerOpenInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10],
[np.random.randint(1, 9, size=(2, 2)).astype('float32')]),
('IntegerHalfOpenInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10],
[np.random.randint(1, 9, size=(2, 2)).astype('float32')]),
('GreaterThan', [np.zeros((2, 2))], [np.random.rand(2, 2)]),
('GreaterThanEq', [np.zeros((2, 2))], [np.random.rand(2, 2)]),
('LessThan', [np.ones((2, 2))], [np.random.rand(2, 2)]),
('LessThanEq', [np.ones((2, 2))], [np.random.rand(2, 2)]),
('IntegerGreaterThan', [np.zeros((2, 2))],
[np.random.randint(1, 10, size=(2, 2)).astype('float32')]),
('IntegerGreaterThanEq', [np.zeros((2, 2))],
[np.random.randint(0, 10, size=(2, 2)).astype('float32')]),
('IntegerLessThan', [np.ones((2, 2)) * 10],
[np.random.randint(0, 9, size=(2, 2)).astype('float32')]),
('IntegerLessThanEq', [np.ones((2, 2)) * 10],
[np.random.randint(0, 10, size=(2, 2)).astype('float32')]),
('Positive', (), [np.random.rand(2, 2)]),
('NonNegative', (), [np.random.rand(2, 2)]),
('PositiveInteger', (), [np.random.randint(
1, 5, size=(2, 2)).astype('float32')]),
('NonNegativeInteger', (), [np.random.randint(
0, 5, size=(2, 2)).astype('float32')]),
('Simplex', (), [npx.softmax(np.random.randn(4, 4), axis=-1)]),
('LowerTriangular', (), [np.tril(np.random.randn(5, 3, 3))]),
('LowerCholesky', (), [np.linalg.cholesky(psd_matrix)]),
('PositiveDefinite', (), [psd_matrix]),
]
for (constraint_type, constraint_arg, test_samples) in constraints_zoo:
for hybridize in [True, False]:
net = TestConstraint(constraint_type)
if hybridize:
net.hybridize()
for test_sample in test_samples:
mx_out = net(test_sample, *constraint_arg).asnumpy()
assert_almost_equal(mx_out, test_sample.asnumpy())
@use_np
def test_gluon_domain_map_v1():
class TestDomainMap(HybridBlock):
def __init__(self, constraint_type, bijective):
super(TestDomainMap, self).__init__()
self._constraint_type = getattr(mgp.constraint, constraint_type)
def hybrid_forward(self, F, *params):
value = params[0]
constraint_param = params[1:]
if len(constraint_param) == 0:
constraint = self._constraint_type()
else:
constraint = self._constraint_type(*constraint_param)
if bijective:
bijector = mgp.biject_to(constraint)
bijector.F = F
value = bijector(value)
else:
transformation = mgp.transform_to(constraint)
transformation.F = F
value = transformation(value)
return (value, constraint.check(value))
constraints_zoo = [
# (constraint_type, constraint_param)
('Positive', ()),
('GreaterThan', [np.random.randn(2, 2)]),
('GreaterThanEq', [np.random.randn(2, 2)]),
('LessThan', [np.random.randn(2, 2)]),
('Interval', [np.random.uniform(0, 1, (2, 2)),
np.random.uniform(2, 3, (2, 2))]),
('HalfOpenInterval', [np.random.uniform(
0, 1, (2, 2)), np.random.uniform(2, 3, (2, 2))])
]
test_sample = np.random.randn(2, 2)
for (constraint_type, constraint_arg) in constraints_zoo:
for bijective in [True, False]:
for hybridize in [True, False]:
net = TestDomainMap(constraint_type, bijective)
if hybridize:
net.hybridize()
constrained_out, constraint_status = net(
test_sample, *constraint_arg)
assert_almost_equal(constrained_out.asnumpy(),
constraint_status.asnumpy())
| 40.768943
| 111
| 0.573182
| 12,012
| 97,927
| 4.475441
| 0.041292
| 0.038245
| 0.046597
| 0.038933
| 0.818282
| 0.788445
| 0.754274
| 0.730259
| 0.716531
| 0.683011
| 0
| 0.019878
| 0.306494
| 97,927
| 2,401
| 112
| 40.785923
| 0.771708
| 0.040918
| 0
| 0.670732
| 0
| 0
| 0.014205
| 0.000246
| 0
| 0
| 0
| 0
| 0.060467
| 1
| 0.065041
| false
| 0
| 0.007114
| 0.009146
| 0.119919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0aefd1e832608cb0768f999e50963866ceec5d5a
| 33
|
py
|
Python
|
odoo-13.0/venv/lib/python3.8/site-packages/PcxImagePlugin.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 2
|
2021-06-20T16:56:45.000Z
|
2021-06-20T17:30:18.000Z
|
odoo-13.0/venv/lib/python3.8/site-packages/PcxImagePlugin.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/venv/lib/python3.8/site-packages/PcxImagePlugin.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
from PIL.PcxImagePlugin import *
| 16.5
| 32
| 0.818182
| 4
| 33
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c20a5d0a36282a85e330e3fffbdb943e39489e5
| 5,387
|
py
|
Python
|
multiworld/envs/mujoco/cameras.py
|
shikharbahl/multiworld
|
85b3200dc9a5821754c2d8ba2b8a7b6add874828
|
[
"MIT"
] | 1
|
2019-01-30T20:55:15.000Z
|
2019-01-30T20:55:15.000Z
|
multiworld/envs/mujoco/cameras.py
|
shikharbahl/multiworld
|
85b3200dc9a5821754c2d8ba2b8a7b6add874828
|
[
"MIT"
] | null | null | null |
multiworld/envs/mujoco/cameras.py
|
shikharbahl/multiworld
|
85b3200dc9a5821754c2d8ba2b8a7b6add874828
|
[
"MIT"
] | null | null | null |
import numpy as np
def create_sawyer_camera_init(
lookat=(0, 0.85, 0.3),
distance=0.3,
elevation=-35,
azimuth=270,
trackbodyid=-1,
):
def init(camera):
camera.lookat[0] = lookat[0]
camera.lookat[1] = lookat[1]
camera.lookat[2] = lookat[2]
camera.distance = distance
camera.elevation = elevation
camera.azimuth = azimuth
camera.trackbodyid = trackbodyid
return init
def init_sawyer_camera_v1(camera):
"""
Do not get so close that the arm crossed the camera plane
"""
camera.lookat[0] = 0
camera.lookat[1] = 1
camera.lookat[2] = 0.3
camera.distance = 0.35
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def init_sawyer_camera_v2(camera):
"""
Top down basically. Sees through the arm.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.8
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -65
camera.azimuth = 270
camera.trackbodyid = -1
def init_sawyer_camera_v3(camera):
"""
Top down basically. Sees through the arm.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pick_and_place_camera(camera):
camera.lookat[0] = 0.0
camera.lookat[1] = .67
camera.lookat[2] = .1
camera.distance = .7
camera.elevation = 0
camera.azimuth = 180
camera.trackbodyid = 0
def init_sawyer_camera_v4(camera):
"""
This is the same camera used in old experiments (circa 6/7/2018)
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pick_and_place_camera_slanted_angle(camera):
camera.lookat[0] = 0.0
camera.lookat[1] = .67
camera.lookat[2] = .1
camera.distance = .65
camera.elevation = -37.85
camera.azimuth = 180
camera.trackbodyid = 0
def init_sawyer_camera_v5(camera):
"""
Purposely zoomed out to be hard.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 1
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_xyz_reacher_camera(camera):
# TODO: reformat or delete
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.3
rotation_angle = 270
cam_pos = np.array([0, 1.0, 0.5, cam_dist, -30, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_torque_reacher_camera(camera):
# TODO: reformat or delete
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.3
rotation_angle = 270
cam_pos = np.array([0, 1.0, 0.65, cam_dist, -30, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera(camera):
camera.trackbodyid = 0
camera.distance = 1.0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([0, 0.725, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera_v2(camera):
camera.trackbodyid = 0
camera.distance = 1.0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([.1, 0.55, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera_v3(camera):
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.25
rotation_angle = 360
cam_pos = np.array([-.2, .55, 0.6, cam_dist, -60, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_pusher_camera_upright(camera):
camera.trackbodyid = 0
camera.distance = .45
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.45
camera.elevation = -50
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pusher_camera_upright_v2(camera):
camera.trackbodyid = 0
camera.distance = .45
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.45
camera.elevation = -60
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pusher_camera_top_down(camera):
camera.trackbodyid = 0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([0, 0.6, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
| 25.055814
| 70
| 0.633377
| 792
| 5,387
| 4.169192
| 0.130051
| 0.13083
| 0.059055
| 0.076317
| 0.818595
| 0.818595
| 0.818595
| 0.809207
| 0.79043
| 0.79043
| 0
| 0.074686
| 0.246891
| 5,387
| 214
| 71
| 25.172897
| 0.739216
| 0.062744
| 0
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004673
| 0
| 1
| 0.104938
| false
| 0
| 0.006173
| 0
| 0.117284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c51fd04058be770247a4490232efadf8255b063
| 1,499
|
py
|
Python
|
NeoML/Python/test/svml.py
|
SAngeliuk/neoml_python
|
09e24dd726426ec880ff1793c287f03c3f1d362e
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-25T08:04:55.000Z
|
2020-12-25T08:04:55.000Z
|
NeoML/Python/test/svml.py
|
SAngeliuk/neoml_python
|
09e24dd726426ec880ff1793c287f03c3f1d362e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
NeoML/Python/test/svml.py
|
SAngeliuk/neoml_python
|
09e24dd726426ec880ff1793c287f03c3f1d362e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import numpy as np
from scipy.sparse import csr_matrix
def read( file_path, min_feature_count=0 ) :
file = open(file_path, "r")
data = []
column = []
row = [0]
y = []
rowCount = 0;
columnCount = 0
elementCount = 0
for line in file:
cur = line.split(" ")
y.append( int(cur[0]) )
for i in range(1, len(cur) ):
item = cur[i].split(":")
data.append( float(item[1]) )
column.append( int(item[0]) )
if int(item[0]) + 1 > columnCount:
columnCount = int(item[0]) + 1
elementCount += 1
rowCount += 1
row.append( elementCount )
X = csr_matrix( ( np.array( data, np.float32 ), np.array( column, np.int32 ), row ), shape=( rowCount, max(columnCount, min_feature_count) ) )
return ( X, y )
def correct( file_path ) :
file = open(file_path, "r")
fileW = open("res.txt", "w")
data = []
column = []
row = [0]
y = []
rowCount = 0;
columnCount = 0
elementCount = 0
for line in file:
cur = line.split(" ")
y.append( int(cur[0]) )
fileW.write( str( int( cur[0] ) - 1 ) )
for i in range(1, len(cur) ):
item = cur[i].split(":")
data.append( float(item[1]) )
column.append( int(item[0]) )
if int(item[0]) + 1 > columnCount:
columnCount = int(item[0]) + 1
elementCount += 1
s = str(int(item[0]) - 1 ) + ":" + item[1]
fileW.write( " " + s )
rowCount += 1
row.append( elementCount )
X = csr_matrix( ( np.array( data, np.float32 ), np.array( column, np.int32 ), row ), shape=( rowCount, columnCount ) )
return ( X, y )
| 21.112676
| 143
| 0.587725
| 223
| 1,499
| 3.901345
| 0.246637
| 0.056322
| 0.064368
| 0.051724
| 0.751724
| 0.712644
| 0.712644
| 0.712644
| 0.712644
| 0.712644
| 0
| 0.036427
| 0.230821
| 1,499
| 70
| 144
| 21.414286
| 0.718127
| 0
| 0
| 0.807692
| 0
| 0
| 0.010681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c8aa137d037945796fb4702c23c7eed165adb50
| 6,516
|
py
|
Python
|
tests/test_math/test_linalg.py
|
yeonsu-jung/PyElastica
|
fee87b9da22e310ff925c16fdc839bf8405c51a4
|
[
"MIT"
] | null | null | null |
tests/test_math/test_linalg.py
|
yeonsu-jung/PyElastica
|
fee87b9da22e310ff925c16fdc839bf8405c51a4
|
[
"MIT"
] | 1
|
2022-01-06T11:30:20.000Z
|
2022-02-07T07:11:22.000Z
|
tests/test_math/test_linalg.py
|
yeonsu-jung/PyElastica
|
fee87b9da22e310ff925c16fdc839bf8405c51a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
__doc__ = (
""" Test scripts for linear algebra helpers in Elastica Numba implementation"""
)
# System imports
import numpy as np
import pytest
from numpy.testing import assert_allclose
from elastica._linalg import (
_batch_matvec,
_batch_matmul,
_batch_cross,
_batch_vec_oneD_vec_cross,
_batch_dot,
_batch_norm,
_batch_product_i_k_to_ik,
_batch_product_i_ik_to_k,
_batch_product_k_ik_to_ik,
_batch_vector_sum,
_batch_matrix_transpose,
)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matvec(blocksize):
input_matrix_collection = np.random.randn(3, 3, blocksize)
input_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_matvec(
input_matrix_collection, input_vector_collection
)
correct_vector_collection = [
np.dot(input_matrix_collection[..., i], input_vector_collection[..., i])
for i in range(blocksize)
]
correct_vector_collection = np.array(correct_vector_collection).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matmul(blocksize):
input_first_matrix_collection = np.random.randn(3, 3, blocksize)
input_second_matrix_collection = np.random.randn(3, 3, blocksize)
test_matrix_collection = _batch_matmul(
input_first_matrix_collection, input_second_matrix_collection
)
correct_matrix_collection = np.empty((3, 3, blocksize))
for i in range(blocksize):
correct_matrix_collection[..., i] = np.dot(
input_first_matrix_collection[..., i],
input_second_matrix_collection[..., i],
)
assert_allclose(test_matrix_collection, correct_matrix_collection)
# TODO : Generalize to two dimensions
@pytest.mark.parametrize("dim", [3])
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_cross(dim, blocksize):
input_first_vector_collection = np.random.randn(dim, blocksize)
input_second_vector_collection = np.random.randn(dim, blocksize)
test_vector_collection = _batch_cross(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.cross(
input_first_vector_collection, input_second_vector_collection, axisa=0, axisb=0
).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_vec_oneD_vec_cross(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector = np.random.randn(3)
test_vector_collection = _batch_vec_oneD_vec_cross(
input_first_vector_collection, input_second_vector
)
correct_vector_collection = np.cross(
input_first_vector_collection, input_second_vector, axisa=0, axisb=0
).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_dot(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_dot(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"ij,ij->j", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_norm(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_norm(input_first_vector_collection)
correct_vector_collection = np.sqrt(
np.einsum(
"ij,ij->j", input_first_vector_collection, input_first_vector_collection
)
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_i_k_to_ik(blocksize):
input_first_vector_collection = np.random.randn(3)
input_second_vector_collection = np.random.randn(blocksize)
test_vector_collection = _batch_product_i_k_to_ik(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"i,j->ij", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_i_ik_to_k(blocksize):
input_first_vector_collection = np.random.randn(3)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_product_i_ik_to_k(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"i,ij->j", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_k_ik_to_ik(blocksize):
input_first_vector_collection = np.random.randn(blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_product_k_ik_to_ik(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"j,ij->ij", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_vector_sum(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_vector_sum(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = (
input_first_vector_collection + input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matrix_transpose(blocksize):
input_matrix_collection = np.random.randn(3, 3, blocksize)
test_matrix_collection = _batch_matrix_transpose(input_matrix_collection)
correct_matrix_collection = np.einsum("ijk->jik", input_matrix_collection)
assert_allclose(test_matrix_collection, correct_matrix_collection)
| 32.909091
| 87
| 0.767035
| 826
| 6,516
| 5.567797
| 0.09201
| 0.292237
| 0.086975
| 0.141335
| 0.83105
| 0.814742
| 0.750381
| 0.737769
| 0.681452
| 0.64775
| 0
| 0.011009
| 0.149632
| 6,516
| 197
| 88
| 33.076142
| 0.818986
| 0.01105
| 0
| 0.345324
| 0
| 0
| 0.023263
| 0
| 0
| 0
| 0
| 0.005076
| 0.086331
| 1
| 0.079137
| false
| 0
| 0.028777
| 0
| 0.107914
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c8fcb1790d0ff00d8d423a19aacfa99859b902c
| 47
|
py
|
Python
|
library/src/test/unit/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 3
|
2019-11-01T04:51:51.000Z
|
2019-12-17T04:25:18.000Z
|
library/src/test/unit/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 1
|
2019-11-11T18:29:36.000Z
|
2019-11-11T18:29:36.000Z
|
library/src/test/unit/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 2
|
2019-12-18T11:49:00.000Z
|
2020-03-27T20:06:15.000Z
|
# Created by Xinyu Zhu on 10/22/2019, 12:45 PM
| 23.5
| 46
| 0.702128
| 11
| 47
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0.191489
| 47
| 1
| 47
| 47
| 0.552632
| 0.93617
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c978c789c087b1d53966c4f08815c356f697da2
| 2,277
|
py
|
Python
|
idl2py/jd/ydn2md.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
idl2py/jd/ydn2md.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
idl2py/jd/ydn2md.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
"""
By Dr Jie Zheng -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def ydn2md():
pass
#;-------------------------------------------------------------
#;+
#; NAME:
#; YDN2MD
#; PURPOSE:
#; Convert from year and day number of year to month and day of month.
#; CALLING SEQUENCE:
#; YDN2MD,yr,dy,m,d
#; INPUTS:
#; yr = 4 digit year (like 1988), integer scalar
#; dy = day number in year (like 310), integer scalar or vector
#;
#; OUTPUTS:
#; m = month number (1-12, e.g. 11 = Nov)
#; d = day of month (like 5).
#; Note: On error returns m = d = -1.
#;
#; EXAMPLE:
#; Find the month/day of days 155 and 255 in the year 2001
#;
#; IDL> ydn2md, 2001, [155,255], m, d
#; ==> m = [6,9] & d = [4,12] ; = June 4 and September 12
#;
#; MODIFICATION HISTORY:
#; Adapted from Johns Hopkins University/Applied Physics Laboratory
#; Update to use VALUE_LOCATE, W. Landsman January 2001
#;-
#;-------------------------------------------------------------
#
# PRO YDN2MD,YR,DY,M,D, help=hlp
#
# IF (N_PARAMS() LT 4) or keyword_set(hlp) THEN BEGIN
# PRINT,' Convert from year and day number of year to month '+$
# 'and day of month.'
# PRINT,' ydn2md,yr,dy,m,d'
# PRINT,' yr = year (like 1988), scalar input'
# PRINT,' dy = day number in year (like 310), scalar or vector input'
# PRINT,' m = month number (like 11 = Nov). out'
# PRINT,' d = day of month (like 5). out'
# PRINT,' Note: On error returns m = d = -1.'
# RETURN
# ENDIF
#
# ; Days before start of each month.
# YDAYS = [0,31,59,90,120,151,181,212,243,273,304,334,366] + 1
#
# LEAP = (((YR MOD 4) EQ 0) AND ((YR MOD 100) NE 0)) OR $
# ((YR MOD 400) EQ 0)
#
# IF LEAP THEN YDAYS[2] = YDAYS[2:*] + 1
# M = VALUE_LOCATE(YDAYS, DY) + 1
# D = DY - YDAYS[M-1] + 1
# BAD = WHERE(M GT 12, NBAD)
#
# IF NBAD GT 0 THEN BEGIN
# M[BAD] = -1
# D[BAD] = -1
# MESSAGE,'Error in Day Number',/CON
# ENDIF
# RETURN
#
# END
| 29.571429
| 85
| 0.474747
| 309
| 2,277
| 3.485437
| 0.430421
| 0.011142
| 0.03714
| 0.030641
| 0.245125
| 0.211699
| 0.181987
| 0.098422
| 0.098422
| 0.098422
| 0
| 0.086207
| 0.337725
| 2,277
| 76
| 86
| 29.960526
| 0.627984
| 0.857708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7ca98cd53abc3e14ed14daf80df06a78869bcbef
| 9,637
|
py
|
Python
|
python/seldon/luigi/spark.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 1,645
|
2015-02-13T12:31:44.000Z
|
2022-03-17T07:50:05.000Z
|
python/seldon/luigi/spark.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 57
|
2015-03-26T16:00:23.000Z
|
2021-05-10T11:03:40.000Z
|
python/seldon/luigi/spark.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 371
|
2015-03-16T11:04:16.000Z
|
2022-02-27T01:16:02.000Z
|
import luigi
from subprocess import call
import logging
from seldon.misc.item_similarity import *
from seldon.misc.most_popular import *
from luigi.contrib.spark import SparkSubmitTask
#
# Item Similarity
#
class ItemSimilaritySparkJob(luigi.Task):
"""
Spark job for running item similarity model
"""
inputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
sparkDriverMemory = luigi.Parameter(default="1g")
sparkExecutorMemory = luigi.Parameter(default="1g")
startDay = luigi.IntParameter(default=1)
days = luigi.IntParameter(default=1)
itemType = luigi.IntParameter(-1)
limit = luigi.IntParameter(default=100)
minItemsPerUser = luigi.IntParameter(default=0)
minUsersPerItem = luigi.IntParameter(default=0)
maxUsersPerItem = luigi.IntParameter(default=2000000)
dimsumThreshold =luigi.FloatParameter(default=0.1)
sample = luigi.FloatParameter(default=1.0)
def output(self):
return luigi.LocalTarget("{}/{}/item-similarity/{}".format(self.outputPath,self.client,self.startDay))
def run(self):
params = ["seldon-cli","model","--action","add","--client-name",self.client,"--model-name","similar-items","--inputPath",self.inputPath,"--outputPath",self.outputPath,"--startDay",str(self.startDay),"--days",str(self.days),"--sample",str(self.sample),"--itemType",str(self.itemType),"--limit",str(self.limit),"--minItemsPerUser",str(self.minItemsPerUser),"--minUsersPerItem",str(self.minUsersPerItem),"--maxUsersPerItem",str(self.maxUsersPerItem),"--dimsumThreshold",str(self.dimsumThreshold)]
res = call(params)
params = ["seldon-cli","model","--action","train","--client-name",self.client,"--model-name","similar-items","--spark-executor-memory",self.sparkExecutorMemory,"--spark-driver-memory",self.sparkDriverMemory]
res = call(params)
return res
class SeldonItemSimilarity(luigi.Task):
"""
Item similarity model. Depends on spark job. Writes results to mysql db.
"""
startDay = luigi.IntParameter(default=1)
client = luigi.Parameter(default="test")
db_host = luigi.Parameter(default="mysql")
db_user = luigi.Parameter(default="root")
db_pass = luigi.Parameter(default="mypass")
def requires(self):
return ItemSimilaritySparkJob(client=self.client,startDay=self.startDay)
def run(self):
u = ItemSimilarityUploadMysql(self.client,self.db_host,self.db_user,self.db_pass)
u.stream_and_upload(self.input().path)
#
# MF
#
class SeldonMatrixFactorization(luigi.Task):
"""
Matrix factorization using Spark
"""
inputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
sparkDriverMemory = luigi.Parameter(default="1g")
sparkExecutorMemory = luigi.Parameter(default="1g")
startDay = luigi.IntParameter(default=1)
days = luigi.IntParameter(default=1)
rank = luigi.IntParameter(default=30)
mf_lambda = luigi.FloatParameter(default=0.01)
alpha = luigi.FloatParameter(default=1)
iterations = luigi.IntParameter(default=5)
def output(self):
return luigi.LocalTarget("{}/{}/matrix-factorization/{}".format(self.outputPath,self.client,self.startDay))
def run(self):
params = ["seldon-cli","model","--action","add","--client-name",self.client,"--model-name","matrix-factorization","--inputPath",self.inputPath,"--outputPath",self.outputPath,"--startDay",str(self.startDay),"--days",str(self.days),"--rank",str(self.rank),"--lambda",str(self.mf_lambda),"--alpha",str(self.alpha),"--iterations",str(self.iterations)]
res = call(params)
params = ["seldon-cli","model","--action","train","--client-name",self.client,"--model-name","matrix-factorization","--spark-executor-memory",self.sparkExecutorMemory,"--spark-driver-memory",self.sparkDriverMemory]
res = call(params)
return res
class SeldonMatrixFactorizationClusters(luigi.Task):
"""
User Clustered Matrix factorization using Spark
"""
inputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
sparkDriverMemory = luigi.Parameter(default="1g")
sparkExecutorMemory = luigi.Parameter(default="1g")
startDay = luigi.IntParameter(default=1)
days = luigi.IntParameter(default=1)
rank = luigi.IntParameter(default=30)
mf_lambda = luigi.FloatParameter(default=0.01)
alpha = luigi.FloatParameter(default=1)
iterations = luigi.IntParameter(default=5)
def output(self):
return luigi.LocalTarget("{}/{}/matrix-factorization-clusters/{}".format(self.outputPath,self.client,self.startDay))
def run(self):
params = ["seldon-cli","model","--action","add","--client-name",self.client,"--model-name","matrix-factorization-clusters","--inputPath",self.inputPath,"--outputPath",self.outputPath,"--startDay",str(self.startDay),"--days",str(self.days),"--rank",str(self.rank),"--lambda",str(self.mf_lambda),"--alpha",str(self.alpha),"--iterations",str(self.iterations)]
res = call(params)
params = ["seldon-cli","model","--action","train","--client-name",self.client,"--model-name","matrix-factorization-clusters","--spark-executor-memory",self.sparkExecutorMemory,"--spark-driver-memory",self.sparkDriverMemory]
res = call(params)
return res
class SeldonMostPopularDim(luigi.Task):
"""
Most Popular by Dimension using Spark
"""
inputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
sparkDriverMemory = luigi.Parameter(default="1g")
sparkExecutorMemory = luigi.Parameter(default="1g")
startDay = luigi.IntParameter(default=1)
days = luigi.IntParameter(default=1)
k = luigi.IntParameter(default=28)
db_host = luigi.Parameter(default="mysql")
db_port = luigi.IntParameter(default=3306)
db_user = luigi.Parameter(default="root")
db_pass = luigi.Parameter(default="mypass")
def output(self):
return luigi.LocalTarget("{}/{}/mostpopulardim/{}".format(self.outputPath,self.client,self.startDay))
def run(self):
jdbc = "jdbc:mysql://"+self.db_host+":"+str(self.db_port)+"/"+self.client+"?characterEncoding=utf8&user="+self.db_user+"&password="+self.db_pass
params = ["seldon-cli","model","--action","add","--client-name",self.client,"--model-name","mostpopulardim","--inputPath",self.inputPath,"--outputPath",self.outputPath,"--startDay",str(self.startDay),"--days",str(self.days),"--jdbc",jdbc,"--k",str(self.k)]
res = call(params)
params = ["seldon-cli","model","--action","train","--client-name",self.client,"--model-name","mostpopulardim","--spark-executor-memory",self.sparkExecutorMemory,"--spark-driver-memory",self.sparkDriverMemory]
res = call(params)
return res
class MostPopularSparkJob(luigi.Task):
"""
Most Popular using Spark
"""
inputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
sparkDriverMemory = luigi.Parameter(default="1g")
sparkExecutorMemory = luigi.Parameter(default="1g")
startDay = luigi.IntParameter(default=1)
days = luigi.IntParameter(default=1)
def output(self):
return luigi.LocalTarget("{}/{}/mostpopular/{}".format(self.outputPath,self.client,self.startDay))
def run(self):
params = ["seldon-cli","model","--action","add","--client-name",self.client,"--model-name","mostpopular","--inputPath",self.inputPath,"--outputPath",self.outputPath,"--startDay",str(self.startDay),"--days",str(self.days)]
res = call(params)
params = ["seldon-cli","model","--action","train","--client-name",self.client,"--model-name","mostpopular","--spark-executor-memory",self.sparkExecutorMemory,"--spark-driver-memory",self.sparkDriverMemory]
res = call(params)
return res
class SeldonMostPopular(luigi.Task):
"""
Most Popular. Depends on spark job. Writes results to mysql db.
"""
startDay = luigi.IntParameter(default=1)
client = luigi.Parameter(default="test")
db_host = luigi.Parameter(default="mysql")
db_user = luigi.Parameter(default="root")
db_pass = luigi.Parameter(default="mypass")
def requires(self):
return MostPopularSparkJob(client=self.client,startDay=self.startDay)
def run(self):
u = MostPopularUploadMysql(self.client,self.db_host,self.db_user,self.db_pass)
u.stream_and_upload(self.input().path)
class SeldonSparkJob(SparkSubmitTask):
"""
Template for running a Spark Job
"""
app = "/home/seldon/libs/seldon-spark.jar"
entry_class = "io.seldon.spark.mllib.SimilarItems"
master = "spark://spark-master:7077"
outputPath = luigi.Parameter(default="/seldon-data/seldon-models/")
client = luigi.Parameter(default="test")
startDay = luigi.IntParameter(default=17278)
def app_options(self):
return ["--client",self.client,"--zookeeper","zookeeper-1"]
def output(self):
return luigi.LocalTarget("{}/{}/item-similarity/{}".format(self.outputPath,self.client,self.startDay))
| 45.672986
| 501
| 0.692747
| 1,094
| 9,637
| 6.073126
| 0.124314
| 0.080072
| 0.120108
| 0.045154
| 0.769868
| 0.769868
| 0.765051
| 0.74699
| 0.743377
| 0.740969
| 0
| 0.008392
| 0.134482
| 9,637
| 210
| 502
| 45.890476
| 0.788155
| 0.03912
| 0
| 0.657143
| 0
| 0
| 0.219691
| 0.091447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0.042857
| 0.042857
| 0.064286
| 0.821429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6b04133a33c09306861fff8d4d2c162bef9235e7
| 26
|
py
|
Python
|
models/__init__.py
|
MarcAntoineAlex/DenseNAS-1
|
7957789aefcfaa569ae8705693b1eabce9161bcf
|
[
"Apache-2.0"
] | 107
|
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
models/__init__.py
|
kayuksel/pytorch-GENet
|
e2dcda697ab04afaf88fb8f867405332bea3301b
|
[
"MIT"
] | 7
|
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
models/__init__.py
|
kayuksel/pytorch-GENet
|
e2dcda697ab04afaf88fb8f867405332bea3301b
|
[
"MIT"
] | 19
|
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
from .wideresnet import *
| 13
| 25
| 0.769231
| 3
| 26
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b0e8deb398b8111d8e1ea99866340b05f37ea1d
| 79
|
py
|
Python
|
userSetup.py
|
Mikfr83/crab
|
d8f0c2a301017b686ace40bc3b3f74ffbc09e3ed
|
[
"MIT"
] | null | null | null |
userSetup.py
|
Mikfr83/crab
|
d8f0c2a301017b686ace40bc3b3f74ffbc09e3ed
|
[
"MIT"
] | null | null | null |
userSetup.py
|
Mikfr83/crab
|
d8f0c2a301017b686ace40bc3b3f74ffbc09e3ed
|
[
"MIT"
] | null | null | null |
import maya.cmds
maya.cmds.evalDeferred('import crab;crab.menu.initialize()')
| 19.75
| 60
| 0.78481
| 11
| 79
| 5.636364
| 0.636364
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 79
| 3
| 61
| 26.333333
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.43038
| 0.341772
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b1af921694d4f3da65562cf4b2c89a44fc320a7
| 22,101
|
py
|
Python
|
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
# Generated from /home/kinzers/projects/cmstack.code/cmstack/cmlang/antlr_generator/CMLang.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2C")
buf.write("\u0217\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6")
buf.write("\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r")
buf.write("\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24")
buf.write("\3\24\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34")
buf.write("\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37")
buf.write("\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3#\3#\3$\3$\3$\3$\3$\3")
buf.write("$\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3&\3&\3\'")
buf.write("\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3+\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3/\3/\3/\3\60\3\60\3\60\3\61\3\61")
buf.write("\3\61\3\62\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\66\3\66\7\66")
buf.write("\u0180\n\66\f\66\16\66\u0183\13\66\3\66\3\66\3\67\3\67")
buf.write("\3\67\7\67\u018a\n\67\f\67\16\67\u018d\13\67\38\38\78")
buf.write("\u0191\n8\f8\168\u0194\138\38\68\u0197\n8\r8\168\u0198")
buf.write("\58\u019b\n8\39\39\39\69\u01a0\n9\r9\169\u01a1\3:\3:\3")
buf.write(":\6:\u01a7\n:\r:\16:\u01a8\3;\3;\3;\6;\u01ae\n;\r;\16")
buf.write(";\u01af\3<\3<\5<\u01b4\n<\3<\3<\3=\3=\5=\u01ba\n=\3>\3")
buf.write(">\3?\6?\u01bf\n?\r?\16?\u01c0\3?\3?\3@\3@\5@\u01c7\n@")
buf.write("\3@\5@\u01ca\n@\3@\3@\3A\3A\3A\3A\7A\u01d2\nA\fA\16A\u01d5")
buf.write("\13A\3A\3A\3A\3A\3A\3B\3B\3B\3B\7B\u01e0\nB\fB\16B\u01e3")
buf.write("\13B\3B\3B\3C\3C\3D\3D\3E\3E\3F\3F\3G\3G\3H\3H\3I\5I\u01f4")
buf.write("\nI\3I\3I\3I\3I\5I\u01fa\nI\3J\3J\5J\u01fe\nJ\3J\3J\3")
buf.write("K\6K\u0203\nK\rK\16K\u0204\3L\3L\6L\u0209\nL\rL\16L\u020a")
buf.write("\3M\3M\5M\u020f\nM\3M\6M\u0212\nM\rM\16M\u0213\3N\3N\4")
buf.write("\u0181\u01d3\2O\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23")
buf.write("\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25")
buf.write(")\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A")
buf.write("\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65")
buf.write("i\66k\67m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085\2")
buf.write("\u0087\2\u0089\2\u008b\2\u008d\2\u008f\2\u0091\2\u0093")
buf.write("\2\u0095\2\u0097\2\u0099\2\u009b\2\3\2\17\4\2QQqq\4\2")
buf.write("ZZzz\4\2DDdd\4\2\13\13\"\"\4\2\f\f\17\17\5\2C\\aac|\3")
buf.write("\2\63;\3\2\62;\3\2\629\5\2\62;CHch\3\2\62\63\4\2GGgg\4")
buf.write("\2--//\2\u0221\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t")
buf.write("\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3")
buf.write("\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2")
buf.write("\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2")
buf.write("\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2")
buf.write("\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65")
buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2")
buf.write("\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2")
buf.write("\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2")
buf.write("\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3")
buf.write("\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e")
buf.write("\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2")
buf.write("o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2")
buf.write("\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081")
buf.write("\3\2\2\2\2\u0083\3\2\2\2\3\u009d\3\2\2\2\5\u009f\3\2\2")
buf.write("\2\7\u00a1\3\2\2\2\t\u00a3\3\2\2\2\13\u00a5\3\2\2\2\r")
buf.write("\u00a7\3\2\2\2\17\u00a9\3\2\2\2\21\u00ab\3\2\2\2\23\u00ad")
buf.write("\3\2\2\2\25\u00af\3\2\2\2\27\u00b1\3\2\2\2\31\u00b3\3")
buf.write("\2\2\2\33\u00b5\3\2\2\2\35\u00b7\3\2\2\2\37\u00b9\3\2")
buf.write("\2\2!\u00bb\3\2\2\2#\u00bd\3\2\2\2%\u00c1\3\2\2\2\'\u00c7")
buf.write("\3\2\2\2)\u00cb\3\2\2\2+\u00d0\3\2\2\2-\u00d8\3\2\2\2")
buf.write("/\u00de\3\2\2\2\61\u00e5\3\2\2\2\63\u00eb\3\2\2\2\65\u00f1")
buf.write("\3\2\2\2\67\u00f8\3\2\2\29\u0102\3\2\2\2;\u010c\3\2\2")
buf.write("\2=\u0112\3\2\2\2?\u0117\3\2\2\2A\u011a\3\2\2\2C\u011d")
buf.write("\3\2\2\2E\u0120\3\2\2\2G\u0122\3\2\2\2I\u0128\3\2\2\2")
buf.write("K\u012f\3\2\2\2M\u0138\3\2\2\2O\u013c\3\2\2\2Q\u0142\3")
buf.write("\2\2\2S\u0146\3\2\2\2U\u014d\3\2\2\2W\u0150\3\2\2\2Y\u0156")
buf.write("\3\2\2\2[\u015b\3\2\2\2]\u0162\3\2\2\2_\u0165\3\2\2\2")
buf.write("a\u0168\3\2\2\2c\u016b\3\2\2\2e\u016e\3\2\2\2g\u0178\3")
buf.write("\2\2\2i\u017b\3\2\2\2k\u017d\3\2\2\2m\u0186\3\2\2\2o\u019a")
buf.write("\3\2\2\2q\u019c\3\2\2\2s\u01a3\3\2\2\2u\u01aa\3\2\2\2")
buf.write("w\u01b3\3\2\2\2y\u01b9\3\2\2\2{\u01bb\3\2\2\2}\u01be\3")
buf.write("\2\2\2\177\u01c9\3\2\2\2\u0081\u01cd\3\2\2\2\u0083\u01db")
buf.write("\3\2\2\2\u0085\u01e6\3\2\2\2\u0087\u01e8\3\2\2\2\u0089")
buf.write("\u01ea\3\2\2\2\u008b\u01ec\3\2\2\2\u008d\u01ee\3\2\2\2")
buf.write("\u008f\u01f0\3\2\2\2\u0091\u01f9\3\2\2\2\u0093\u01fd\3")
buf.write("\2\2\2\u0095\u0202\3\2\2\2\u0097\u0206\3\2\2\2\u0099\u020c")
buf.write("\3\2\2\2\u009b\u0215\3\2\2\2\u009d\u009e\7*\2\2\u009e")
buf.write("\4\3\2\2\2\u009f\u00a0\7+\2\2\u00a0\6\3\2\2\2\u00a1\u00a2")
buf.write("\7}\2\2\u00a2\b\3\2\2\2\u00a3\u00a4\7\177\2\2\u00a4\n")
buf.write("\3\2\2\2\u00a5\u00a6\7.\2\2\u00a6\f\3\2\2\2\u00a7\u00a8")
buf.write("\7]\2\2\u00a8\16\3\2\2\2\u00a9\u00aa\7_\2\2\u00aa\20\3")
buf.write("\2\2\2\u00ab\u00ac\7<\2\2\u00ac\22\3\2\2\2\u00ad\u00ae")
buf.write("\7-\2\2\u00ae\24\3\2\2\2\u00af\u00b0\7/\2\2\u00b0\26\3")
buf.write("\2\2\2\u00b1\u00b2\7,\2\2\u00b2\30\3\2\2\2\u00b3\u00b4")
buf.write("\7\61\2\2\u00b4\32\3\2\2\2\u00b5\u00b6\7\'\2\2\u00b6\34")
buf.write("\3\2\2\2\u00b7\u00b8\7>\2\2\u00b8\36\3\2\2\2\u00b9\u00ba")
buf.write("\7@\2\2\u00ba \3\2\2\2\u00bb\u00bc\7A\2\2\u00bc\"\3\2")
buf.write("\2\2\u00bd\u00be\7k\2\2\u00be\u00bf\7p\2\2\u00bf\u00c0")
buf.write("\7v\2\2\u00c0$\3\2\2\2\u00c1\u00c2\7h\2\2\u00c2\u00c3")
buf.write("\7n\2\2\u00c3\u00c4\7q\2\2\u00c4\u00c5\7c\2\2\u00c5\u00c6")
buf.write("\7v\2\2\u00c6&\3\2\2\2\u00c7\u00c8\7u\2\2\u00c8\u00c9")
buf.write("\7v\2\2\u00c9\u00ca\7t\2\2\u00ca(\3\2\2\2\u00cb\u00cc")
buf.write("\7d\2\2\u00cc\u00cd\7q\2\2\u00cd\u00ce\7q\2\2\u00ce\u00cf")
buf.write("\7n\2\2\u00cf*\3\2\2\2\u00d0\u00d1\7e\2\2\u00d1\u00d2")
buf.write("\7q\2\2\u00d2\u00d3\7o\2\2\u00d3\u00d4\7r\2\2\u00d4\u00d5")
buf.write("\7n\2\2\u00d5\u00d6\7g\2\2\u00d6\u00d7\7z\2\2\u00d7,\3")
buf.write("\2\2\2\u00d8\u00d9\7k\2\2\u00d9\u00da\7p\2\2\u00da\u00db")
buf.write("\7r\2\2\u00db\u00dc\7w\2\2\u00dc\u00dd\7v\2\2\u00dd.\3")
buf.write("\2\2\2\u00de\u00df\7q\2\2\u00df\u00e0\7w\2\2\u00e0\u00e1")
buf.write("\7v\2\2\u00e1\u00e2\7r\2\2\u00e2\u00e3\7w\2\2\u00e3\u00e4")
buf.write("\7v\2\2\u00e4\60\3\2\2\2\u00e5\u00e6\7u\2\2\u00e6\u00e7")
buf.write("\7v\2\2\u00e7\u00e8\7c\2\2\u00e8\u00e9\7v\2\2\u00e9\u00ea")
buf.write("\7g\2\2\u00ea\62\3\2\2\2\u00eb\u00ec\7r\2\2\u00ec\u00ed")
buf.write("\7c\2\2\u00ed\u00ee\7t\2\2\u00ee\u00ef\7c\2\2\u00ef\u00f0")
buf.write("\7o\2\2\u00f0\64\3\2\2\2\u00f1\u00f2\7u\2\2\u00f2\u00f3")
buf.write("\7r\2\2\u00f3\u00f4\7t\2\2\u00f4\u00f5\7k\2\2\u00f5\u00f6")
buf.write("\7p\2\2\u00f6\u00f7\7i\2\2\u00f7\66\3\2\2\2\u00f8\u00f9")
buf.write("\7t\2\2\u00f9\u00fa\7g\2\2\u00fa\u00fb\7u\2\2\u00fb\u00fc")
buf.write("\7g\2\2\u00fc\u00fd\7t\2\2\u00fd\u00fe\7x\2\2\u00fe\u00ff")
buf.write("\7q\2\2\u00ff\u0100\7k\2\2\u0100\u0101\7t\2\2\u01018\3")
buf.write("\2\2\2\u0102\u0103\7e\2\2\u0103\u0104\7q\2\2\u0104\u0105")
buf.write("\7o\2\2\u0105\u0106\7r\2\2\u0106\u0107\7q\2\2\u0107\u0108")
buf.write("\7p\2\2\u0108\u0109\7g\2\2\u0109\u010a\7p\2\2\u010a\u010b")
buf.write("\7v\2\2\u010b:\3\2\2\2\u010c\u010d\7k\2\2\u010d\u010e")
buf.write("\7p\2\2\u010e\u010f\7f\2\2\u010f\u0110\7g\2\2\u0110\u0111")
buf.write("\7z\2\2\u0111<\3\2\2\2\u0112\u0113\7h\2\2\u0113\u0114")
buf.write("\7n\2\2\u0114\u0115\7q\2\2\u0115\u0116\7y\2\2\u0116>\3")
buf.write("\2\2\2\u0117\u0118\7\60\2\2\u0118\u0119\7,\2\2\u0119@")
buf.write("\3\2\2\2\u011a\u011b\7\60\2\2\u011b\u011c\7^\2\2\u011c")
buf.write("B\3\2\2\2\u011d\u011e\7\60\2\2\u011e\u011f\7\61\2\2\u011f")
buf.write("D\3\2\2\2\u0120\u0121\7`\2\2\u0121F\3\2\2\2\u0122\u0123")
buf.write("\7d\2\2\u0123\u0124\7t\2\2\u0124\u0125\7g\2\2\u0125\u0126")
buf.write("\7c\2\2\u0126\u0127\7m\2\2\u0127H\3\2\2\2\u0128\u0129")
buf.write("\7t\2\2\u0129\u012a\7g\2\2\u012a\u012b\7v\2\2\u012b\u012c")
buf.write("\7w\2\2\u012c\u012d\7t\2\2\u012d\u012e\7p\2\2\u012eJ\3")
buf.write("\2\2\2\u012f\u0130\7h\2\2\u0130\u0131\7w\2\2\u0131\u0132")
buf.write("\7p\2\2\u0132\u0133\7e\2\2\u0133\u0134\7v\2\2\u0134\u0135")
buf.write("\7k\2\2\u0135\u0136\7q\2\2\u0136\u0137\7p\2\2\u0137L\3")
buf.write("\2\2\2\u0138\u0139\7h\2\2\u0139\u013a\7q\2\2\u013a\u013b")
buf.write("\7t\2\2\u013bN\3\2\2\2\u013c\u013d\7y\2\2\u013d\u013e")
buf.write("\7j\2\2\u013e\u013f\7k\2\2\u013f\u0140\7n\2\2\u0140\u0141")
buf.write("\7g\2\2\u0141P\3\2\2\2\u0142\u0143\7g\2\2\u0143\u0144")
buf.write("\7p\2\2\u0144\u0145\7f\2\2\u0145R\3\2\2\2\u0146\u0147")
buf.write("\7i\2\2\u0147\u0148\7n\2\2\u0148\u0149\7q\2\2\u0149\u014a")
buf.write("\7d\2\2\u014a\u014b\7c\2\2\u014b\u014c\7n\2\2\u014cT\3")
buf.write("\2\2\2\u014d\u014e\7k\2\2\u014e\u014f\7h\2\2\u014fV\3")
buf.write("\2\2\2\u0150\u0151\7e\2\2\u0151\u0152\7n\2\2\u0152\u0153")
buf.write("\7g\2\2\u0153\u0154\7c\2\2\u0154\u0155\7t\2\2\u0155X\3")
buf.write("\2\2\2\u0156\u0157\7g\2\2\u0157\u0158\7n\2\2\u0158\u0159")
buf.write("\7u\2\2\u0159\u015a\7g\2\2\u015aZ\3\2\2\2\u015b\u015c")
buf.write("\7g\2\2\u015c\u015d\7n\2\2\u015d\u015e\7u\2\2\u015e\u015f")
buf.write("\7g\2\2\u015f\u0160\7k\2\2\u0160\u0161\7h\2\2\u0161\\")
buf.write("\3\2\2\2\u0162\u0163\7>\2\2\u0163\u0164\7?\2\2\u0164^")
buf.write("\3\2\2\2\u0165\u0166\7@\2\2\u0166\u0167\7?\2\2\u0167`")
buf.write("\3\2\2\2\u0168\u0169\7?\2\2\u0169\u016a\7?\2\2\u016ab")
buf.write("\3\2\2\2\u016b\u016c\7#\2\2\u016c\u016d\7?\2\2\u016dd")
buf.write("\3\2\2\2\u016e\u016f\7v\2\2\u016f\u0170\7t\2\2\u0170\u0171")
buf.write("\7c\2\2\u0171\u0172\7p\2\2\u0172\u0173\7u\2\2\u0173\u0174")
buf.write("\7r\2\2\u0174\u0175\7q\2\2\u0175\u0176\7u\2\2\u0176\u0177")
buf.write("\7g\2\2\u0177f\3\2\2\2\u0178\u0179\7\60\2\2\u0179\u017a")
buf.write("\7)\2\2\u017ah\3\2\2\2\u017b\u017c\7=\2\2\u017cj\3\2\2")
buf.write("\2\u017d\u0181\7$\2\2\u017e\u0180\13\2\2\2\u017f\u017e")
buf.write("\3\2\2\2\u0180\u0183\3\2\2\2\u0181\u0182\3\2\2\2\u0181")
buf.write("\u017f\3\2\2\2\u0182\u0184\3\2\2\2\u0183\u0181\3\2\2\2")
buf.write("\u0184\u0185\7$\2\2\u0185l\3\2\2\2\u0186\u018b\5\u0085")
buf.write("C\2\u0187\u018a\5\u0085C\2\u0188\u018a\5\u0089E\2\u0189")
buf.write("\u0187\3\2\2\2\u0189\u0188\3\2\2\2\u018a\u018d\3\2\2\2")
buf.write("\u018b\u0189\3\2\2\2\u018b\u018c\3\2\2\2\u018cn\3\2\2")
buf.write("\2\u018d\u018b\3\2\2\2\u018e\u0192\5\u0087D\2\u018f\u0191")
buf.write("\5\u0089E\2\u0190\u018f\3\2\2\2\u0191\u0194\3\2\2\2\u0192")
buf.write("\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u019b\3\2\2\2")
buf.write("\u0194\u0192\3\2\2\2\u0195\u0197\7\62\2\2\u0196\u0195")
buf.write("\3\2\2\2\u0197\u0198\3\2\2\2\u0198\u0196\3\2\2\2\u0198")
buf.write("\u0199\3\2\2\2\u0199\u019b\3\2\2\2\u019a\u018e\3\2\2\2")
buf.write("\u019a\u0196\3\2\2\2\u019bp\3\2\2\2\u019c\u019d\7\62\2")
buf.write("\2\u019d\u019f\t\2\2\2\u019e\u01a0\5\u008bF\2\u019f\u019e")
buf.write("\3\2\2\2\u01a0\u01a1\3\2\2\2\u01a1\u019f\3\2\2\2\u01a1")
buf.write("\u01a2\3\2\2\2\u01a2r\3\2\2\2\u01a3\u01a4\7\62\2\2\u01a4")
buf.write("\u01a6\t\3\2\2\u01a5\u01a7\5\u008dG\2\u01a6\u01a5\3\2")
buf.write("\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a8\u01a9")
buf.write("\3\2\2\2\u01a9t\3\2\2\2\u01aa\u01ab\7\62\2\2\u01ab\u01ad")
buf.write("\t\4\2\2\u01ac\u01ae\5\u008fH\2\u01ad\u01ac\3\2\2\2\u01ae")
buf.write("\u01af\3\2\2\2\u01af\u01ad\3\2\2\2\u01af\u01b0\3\2\2\2")
buf.write("\u01b0v\3\2\2\2\u01b1\u01b4\5y=\2\u01b2\u01b4\5\u0095")
buf.write("K\2\u01b3\u01b1\3\2\2\2\u01b3\u01b2\3\2\2\2\u01b4\u01b5")
buf.write("\3\2\2\2\u01b5\u01b6\7k\2\2\u01b6x\3\2\2\2\u01b7\u01ba")
buf.write("\5\u0091I\2\u01b8\u01ba\5\u0093J\2\u01b9\u01b7\3\2\2\2")
buf.write("\u01b9\u01b8\3\2\2\2\u01baz\3\2\2\2\u01bb\u01bc\7?\2\2")
buf.write("\u01bc|\3\2\2\2\u01bd\u01bf\t\5\2\2\u01be\u01bd\3\2\2")
buf.write("\2\u01bf\u01c0\3\2\2\2\u01c0\u01be\3\2\2\2\u01c0\u01c1")
buf.write("\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2\u01c3\b?\2\2\u01c3")
buf.write("~\3\2\2\2\u01c4\u01c6\7\17\2\2\u01c5\u01c7\7\f\2\2\u01c6")
buf.write("\u01c5\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7\u01ca\3\2\2\2")
buf.write("\u01c8\u01ca\7\f\2\2\u01c9\u01c4\3\2\2\2\u01c9\u01c8\3")
buf.write("\2\2\2\u01ca\u01cb\3\2\2\2\u01cb\u01cc\b@\2\2\u01cc\u0080")
buf.write("\3\2\2\2\u01cd\u01ce\7\61\2\2\u01ce\u01cf\7,\2\2\u01cf")
buf.write("\u01d3\3\2\2\2\u01d0\u01d2\13\2\2\2\u01d1\u01d0\3\2\2")
buf.write("\2\u01d2\u01d5\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d3\u01d1")
buf.write("\3\2\2\2\u01d4\u01d6\3\2\2\2\u01d5\u01d3\3\2\2\2\u01d6")
buf.write("\u01d7\7,\2\2\u01d7\u01d8\7\61\2\2\u01d8\u01d9\3\2\2\2")
buf.write("\u01d9\u01da\bA\2\2\u01da\u0082\3\2\2\2\u01db\u01dc\7")
buf.write("\61\2\2\u01dc\u01dd\7\61\2\2\u01dd\u01e1\3\2\2\2\u01de")
buf.write("\u01e0\n\6\2\2\u01df\u01de\3\2\2\2\u01e0\u01e3\3\2\2\2")
buf.write("\u01e1\u01df\3\2\2\2\u01e1\u01e2\3\2\2\2\u01e2\u01e4\3")
buf.write("\2\2\2\u01e3\u01e1\3\2\2\2\u01e4\u01e5\bB\2\2\u01e5\u0084")
buf.write("\3\2\2\2\u01e6\u01e7\t\7\2\2\u01e7\u0086\3\2\2\2\u01e8")
buf.write("\u01e9\t\b\2\2\u01e9\u0088\3\2\2\2\u01ea\u01eb\t\t\2\2")
buf.write("\u01eb\u008a\3\2\2\2\u01ec\u01ed\t\n\2\2\u01ed\u008c\3")
buf.write("\2\2\2\u01ee\u01ef\t\13\2\2\u01ef\u008e\3\2\2\2\u01f0")
buf.write("\u01f1\t\f\2\2\u01f1\u0090\3\2\2\2\u01f2\u01f4\5\u0095")
buf.write("K\2\u01f3\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f5")
buf.write("\3\2\2\2\u01f5\u01fa\5\u0097L\2\u01f6\u01f7\5\u0095K\2")
buf.write("\u01f7\u01f8\7\60\2\2\u01f8\u01fa\3\2\2\2\u01f9\u01f3")
buf.write("\3\2\2\2\u01f9\u01f6\3\2\2\2\u01fa\u0092\3\2\2\2\u01fb")
buf.write("\u01fe\5\u0095K\2\u01fc\u01fe\5\u0091I\2\u01fd\u01fb\3")
buf.write("\2\2\2\u01fd\u01fc\3\2\2\2\u01fe\u01ff\3\2\2\2\u01ff\u0200")
buf.write("\5\u0099M\2\u0200\u0094\3\2\2\2\u0201\u0203\5\u0089E\2")
buf.write("\u0202\u0201\3\2\2\2\u0203\u0204\3\2\2\2\u0204\u0202\3")
buf.write("\2\2\2\u0204\u0205\3\2\2\2\u0205\u0096\3\2\2\2\u0206\u0208")
buf.write("\7\60\2\2\u0207\u0209\5\u0089E\2\u0208\u0207\3\2\2\2\u0209")
buf.write("\u020a\3\2\2\2\u020a\u0208\3\2\2\2\u020a\u020b\3\2\2\2")
buf.write("\u020b\u0098\3\2\2\2\u020c\u020e\t\r\2\2\u020d\u020f\t")
buf.write("\16\2\2\u020e\u020d\3\2\2\2\u020e\u020f\3\2\2\2\u020f")
buf.write("\u0211\3\2\2\2\u0210\u0212\5\u0089E\2\u0211\u0210\3\2")
buf.write("\2\2\u0212\u0213\3\2\2\2\u0213\u0211\3\2\2\2\u0213\u0214")
buf.write("\3\2\2\2\u0214\u009a\3\2\2\2\u0215\u0216\t\16\2\2\u0216")
buf.write("\u009c\3\2\2\2\32\2\u0181\u0189\u018b\u0192\u0198\u019a")
buf.write("\u01a1\u01a8\u01af\u01b3\u01b9\u01c0\u01c6\u01c9\u01d3")
buf.write("\u01e1\u01f3\u01f9\u01fd\u0204\u020a\u020e\u0213\3\b\2")
buf.write("\2")
return buf.getvalue()
class CMLangLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
INPUT = 22
OUTPUT = 23
STATE = 24
PARAMETER = 25
SPRING = 26
RESERVOIR = 27
COMPONENT = 28
INDEX = 29
FLOW = 30
ARRAYMUL = 31
ARRAYDIV = 32
ARRAYRDIV = 33
POW = 34
BREAK = 35
RETURN = 36
FUNCTION = 37
FOR = 38
WHILE = 39
END = 40
GLOBAL = 41
IF = 42
CLEAR = 43
ELSE = 44
ELSEIF = 45
LE_OP = 46
GE_OP = 47
EQ_OP = 48
NE_OP = 49
TRANSPOSE = 50
NCTRANSPOSE = 51
SEMI = 52
STRING_LITERAL = 53
IDENTIFIER = 54
DECIMAL_INTEGER = 55
OCT_INTEGER = 56
HEX_INTEGER = 57
BIN_INTEGER = 58
IMAG_NUMBER = 59
FLOAT_NUMBER = 60
EQ = 61
WHITESPACE = 62
NEWLINE = 63
BLOCKCOMMENT = 64
LINECOMMENT = 65
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'{'", "'}'", "','", "'['", "']'", "':'", "'+'",
"'-'", "'*'", "'/'", "'%'", "'<'", "'>'", "'?'", "'int'", "'float'",
"'str'", "'bool'", "'complex'", "'input'", "'output'", "'state'",
"'param'", "'spring'", "'reservoir'", "'component'", "'index'",
"'flow'", "'.*'", "'.\\'", "'./'", "'^'", "'break'", "'return'",
"'function'", "'for'", "'while'", "'end'", "'global'", "'if'",
"'clear'", "'else'", "'elseif'", "'<='", "'>='", "'=='", "'!='",
"'transpose'", "'.''", "';'", "'='" ]
symbolicNames = [ "<INVALID>",
"INPUT", "OUTPUT", "STATE", "PARAMETER", "SPRING", "RESERVOIR",
"COMPONENT", "INDEX", "FLOW", "ARRAYMUL", "ARRAYDIV", "ARRAYRDIV",
"POW", "BREAK", "RETURN", "FUNCTION", "FOR", "WHILE", "END",
"GLOBAL", "IF", "CLEAR", "ELSE", "ELSEIF", "LE_OP", "GE_OP",
"EQ_OP", "NE_OP", "TRANSPOSE", "NCTRANSPOSE", "SEMI", "STRING_LITERAL",
"IDENTIFIER", "DECIMAL_INTEGER", "OCT_INTEGER", "HEX_INTEGER",
"BIN_INTEGER", "IMAG_NUMBER", "FLOAT_NUMBER", "EQ", "WHITESPACE",
"NEWLINE", "BLOCKCOMMENT", "LINECOMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "INPUT", "OUTPUT", "STATE", "PARAMETER", "SPRING",
"RESERVOIR", "COMPONENT", "INDEX", "FLOW", "ARRAYMUL",
"ARRAYDIV", "ARRAYRDIV", "POW", "BREAK", "RETURN", "FUNCTION",
"FOR", "WHILE", "END", "GLOBAL", "IF", "CLEAR", "ELSE",
"ELSEIF", "LE_OP", "GE_OP", "EQ_OP", "NE_OP", "TRANSPOSE",
"NCTRANSPOSE", "SEMI", "STRING_LITERAL", "IDENTIFIER",
"DECIMAL_INTEGER", "OCT_INTEGER", "HEX_INTEGER", "BIN_INTEGER",
"IMAG_NUMBER", "FLOAT_NUMBER", "EQ", "WHITESPACE", "NEWLINE",
"BLOCKCOMMENT", "LINECOMMENT", "NONDIGIT", "NON_ZERO_DIGIT",
"DIGIT", "OCT_DIGIT", "HEX_DIGIT", "BIN_DIGIT", "POINT_FLOAT",
"EXPONENT_FLOAT", "INT_PART", "FRACTION", "EXPONENT",
"SIGN" ]
grammarFileName = "CMLang.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 59.894309
| 108
| 0.564183
| 4,878
| 22,101
| 2.52665
| 0.163592
| 0.135335
| 0.0757
| 0.081785
| 0.288114
| 0.192454
| 0.116105
| 0.107586
| 0.102718
| 0.100203
| 0
| 0.338236
| 0.15402
| 22,101
| 368
| 109
| 60.057065
| 0.320961
| 0.004796
| 0
| 0
| 1
| 0.497143
| 0.625392
| 0.567869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005714
| false
| 0
| 0.011429
| 0
| 0.231429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8628c79257b2a142662b9fcc9f2c67dcec552c6d
| 167
|
py
|
Python
|
api/models.py
|
zpx01/react-movie-rating
|
6869792d30fe8bf249bb7780f03ac1fbda4db3eb
|
[
"MIT"
] | null | null | null |
api/models.py
|
zpx01/react-movie-rating
|
6869792d30fe8bf249bb7780f03ac1fbda4db3eb
|
[
"MIT"
] | null | null | null |
api/models.py
|
zpx01/react-movie-rating
|
6869792d30fe8bf249bb7780f03ac1fbda4db3eb
|
[
"MIT"
] | null | null | null |
from . import db
class Movie(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
rating = db.Column(db.Integer)
| 27.833333
| 49
| 0.652695
| 26
| 167
| 4.153846
| 0.615385
| 0.222222
| 0.277778
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015038
| 0.203593
| 167
| 6
| 50
| 27.833333
| 0.796992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
8633924452eda9f379515d6309ee6193f8a43999
| 19,097
|
py
|
Python
|
aiida/backends/sqlalchemy/migrations/versions/e15ef2630a1b_initial_schema.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/backends/sqlalchemy/migrations/versions/e15ef2630a1b_initial_schema.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/sqlalchemy/migrations/versions/e15ef2630a1b_initial_schema.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Initial schema
Revision ID: e15ef2630a1b
Revises:
Create Date: 2017-06-28 17:12:23.327195
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm.session import Session
from aiida.backends.sqlalchemy.utils import install_tc
# revision identifiers, used by Alembic.
revision = 'e15ef2630a1b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('db_dbuser',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('email', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('password', sa.VARCHAR(length=128), autoincrement=False, nullable=True),
sa.Column('is_superuser', sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column('first_name', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('last_name', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('institution', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('is_staff', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('is_active', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('last_login', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('date_joined', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbuser_pkey'),
postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbuser_email', 'db_dbuser', ['email'], unique=True)
op.create_table('db_dbworkflow',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('nodeversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('lastsyncedversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('report', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('module', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('module_class', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('script_path', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('script_md5', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbworkflow_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflow_pkey'),
postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbworkflow_label', 'db_dbworkflow', ['label'])
op.create_table('db_dbworkflowstep',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('nextcall', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowstep_parent_id_fkey'),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbworkflowstep_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_pkey'),
sa.UniqueConstraint('parent_id', 'name', name=u'db_dbworkflowstep_parent_id_name_key'),
postgresql_ignore_search_path=False
)
op.create_table('db_dbcomputer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('hostname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('enabled', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('transport_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('scheduler_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('transport_params', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcomputer_pkey'),
sa.UniqueConstraint('name', name=u'db_dbcomputer_name_key')
)
op.create_table('db_dbauthinfo',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('aiidauser_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbcomputer_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('auth_params', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('enabled', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['aiidauser_id'], [u'db_dbuser.id'], name=u'db_dbauthinfo_aiidauser_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['dbcomputer_id'], [u'db_dbcomputer.id'], name=u'db_dbauthinfo_dbcomputer_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbauthinfo_pkey'),
sa.UniqueConstraint('aiidauser_id', 'dbcomputer_id', name=u'db_dbauthinfo_aiidauser_id_dbcomputer_id_key')
)
op.create_table('db_dbgroup',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbgroup_user_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbgroup_pkey'),
sa.UniqueConstraint('name', 'type', name=u'db_dbgroup_name_type_key')
)
op.create_index('ix_db_dbgroup_name', 'db_dbgroup', ['name'])
op.create_index('ix_db_dbgroup_type', 'db_dbgroup', ['type'])
op.create_table('db_dbnode',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('nodeversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('public', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('attributes', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('extras', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('dbcomputer_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['dbcomputer_id'], [u'db_dbcomputer.id'], name=u'db_dbnode_dbcomputer_id_fkey', ondelete=u'RESTRICT', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbnode_user_id_fkey', ondelete=u'RESTRICT', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbnode_pkey'),postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbnode_label', 'db_dbnode', ['label'])
op.create_index('ix_db_dbnode_type', 'db_dbnode', ['type'])
op.create_table('db_dbgroup_dbnodes',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbgroup_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbgroup_id'], [u'db_dbgroup.id'], name=u'db_dbgroup_dbnodes_dbgroup_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbgroup_dbnodes_dbnode_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbgroup_dbnodes_pkey')
)
op.create_table('db_dblock',
sa.Column('key', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('creation', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('timeout', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('owner', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('key', name=u'db_dblock_pkey')
)
op.create_table('db_dbworkflowdata',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('data_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('value_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('json_value', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('aiida_obj_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['aiida_obj_id'], [u'db_dbnode.id'], name=u'db_dbworkflowdata_aiida_obj_id_fkey'),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowdata_parent_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowdata_pkey'),
sa.UniqueConstraint('parent_id', 'name', 'data_type', name=u'db_dbworkflowdata_parent_id_name_data_type_key')
)
op.create_table('db_dblink',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('input_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('output_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['input_id'], [u'db_dbnode.id'], name=u'db_dblink_input_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['output_id'], [u'db_dbnode.id'], name=u'db_dblink_output_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dblink_pkey'),
)
op.create_index('ix_db_dblink_label', 'db_dblink', ['label'])
op.create_table('db_dbworkflowstep_calculations',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbworkflowstep_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbworkflowstep_calculations_dbnode_id_fkey'),
sa.ForeignKeyConstraint(['dbworkflowstep_id'], [u'db_dbworkflowstep.id'], name=u'db_dbworkflowstep_calculations_dbworkflowstep_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_calculations_pkey'),
sa.UniqueConstraint('dbworkflowstep_id', 'dbnode_id', name=u'db_dbworkflowstep_calculations_dbworkflowstep_id_dbnode_id_key')
)
op.create_table('db_dbpath',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('child_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('depth', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('entry_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('direct_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('exit_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['child_id'], [u'db_dbnode.id'], name=u'db_dbpath_child_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbnode.id'], name=u'db_dbpath_parent_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbpath_pkey')
)
op.create_table('db_dbcalcstate',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbcalcstate_dbnode_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcalcstate_pkey'),
sa.UniqueConstraint('dbnode_id', 'state', name=u'db_dbcalcstate_dbnode_id_state_key')
)
op.create_index('ix_db_dbcalcstate_state', 'db_dbcalcstate', ['state'])
op.create_table('db_dbsetting',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('key', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('val', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('description', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbsetting_pkey'),
sa.UniqueConstraint('key', name=u'db_dbsetting_key_key')
)
op.create_index('ix_db_dbsetting_key', 'db_dbsetting', ['key'])
op.create_table('db_dbcomment',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('content', sa.TEXT(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbcomment_dbnode_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbcomment_user_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcomment_pkey')
)
op.create_table('db_dblog',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('loggername', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('levelname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('objname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('objpk', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('message', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dblog_pkey')
)
op.create_index('ix_db_dblog_levelname', 'db_dblog', ['levelname'])
op.create_index('ix_db_dblog_loggername', 'db_dblog', ['loggername'])
op.create_index('ix_db_dblog_objname', 'db_dblog', ['objname'])
op.create_index('ix_db_dblog_objpk', 'db_dblog', ['objpk'])
op.create_table('db_dbworkflowstep_sub_workflows',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbworkflowstep_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbworkflow_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbworkflow_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowstep_sub_workflows_dbworkflow_id_fkey'),
sa.ForeignKeyConstraint(['dbworkflowstep_id'], [u'db_dbworkflowstep.id'], name=u'db_dbworkflowstep_sub_workflows_dbworkflowstep_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_sub_workflows_pkey'),
sa.UniqueConstraint('dbworkflowstep_id', 'dbworkflow_id', name=u'db_dbworkflowstep_sub_workflo_dbworkflowstep_id_dbworkflow__key')
)
# I get the session using the alembic connection
# (Keep in mind that alembic uses the AiiDA SQLA
# session)
session = Session(bind=op.get_bind())
install_tc(session)
def downgrade():
op.drop_table('db_dbworkflowstep_calculations')
op.drop_table('db_dbworkflowstep_sub_workflows')
op.drop_table('db_dbworkflowdata')
op.drop_table('db_dbworkflowstep')
op.drop_table('db_dbworkflow')
op.drop_table('db_dbgroup_dbnodes')
op.drop_table('db_dbgroup')
op.drop_table('db_dblink')
op.drop_table('db_dbpath')
op.drop_table('db_dbcalcstate')
op.drop_table('db_dbcomment')
op.drop_table('db_dbnode')
op.drop_table('db_dbauthinfo')
op.drop_table('db_dbuser')
op.drop_table('db_dbcomputer')
op.drop_table('db_dblog')
op.drop_table('db_dbsetting')
op.drop_table('db_dblock')
| 66.309028
| 173
| 0.724302
| 2,455
| 19,097
| 5.442363
| 0.086762
| 0.075443
| 0.21211
| 0.229025
| 0.825387
| 0.777711
| 0.726293
| 0.705786
| 0.659681
| 0.567323
| 0
| 0.007748
| 0.107923
| 19,097
| 287
| 174
| 66.54007
| 0.776532
| 0.036341
| 0
| 0.290076
| 0
| 0
| 0.230567
| 0.077569
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007634
| false
| 0.003817
| 0.030534
| 0
| 0.038168
| 0.003817
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86415dff82b7a45f70ba903723aa0dc9603b9375
| 374
|
py
|
Python
|
__init__.py
|
Leonardo-H/Energy-Efficient-RL
|
df5845bedcce16593c46724e88161f172c88e27b
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
Leonardo-H/Energy-Efficient-RL
|
df5845bedcce16593c46724e88161f172c88e27b
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
Leonardo-H/Energy-Efficient-RL
|
df5845bedcce16593c46724e88161f172c88e27b
|
[
"Apache-2.0"
] | null | null | null |
from baselines.EERL.graph.advantage_learning_graph import adv_build_act, adv_build_train
from baselines.EERL.graph.imitation_graph import imit_build_act, imit_build_train
from baselines.EERL.graph.svgd_imitation_graph import svgd_imit_build_act, svgd_imit_build_train
from baselines.EERL.graph.svgd_advantage_learning_graph import svgd_adv_build_act, svgd_adv_build_train
| 93.5
| 103
| 0.898396
| 60
| 374
| 5.133333
| 0.233333
| 0.168831
| 0.220779
| 0.285714
| 0.363636
| 0.363636
| 0.25974
| 0.25974
| 0
| 0
| 0
| 0
| 0.058824
| 374
| 4
| 103
| 93.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
864fb219f6ce94d4397b1987dc10b8871a6243e5
| 2,471
|
py
|
Python
|
tests/test_longest_palindromic.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
tests/test_longest_palindromic.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
tests/test_longest_palindromic.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
import pytest
from problems.longest_palindromic import Solution
@pytest.mark.parametrize("s,expected", [
("", ""),
("a", "a"),
("aa", "aa"),
("aaa", "aaa"),
("aba", "aba"),
("abaa", "aba"),
("abaac", "aba"),
("aaba", "aba"),
("caaba", "aba"),
("abba", "abba"),
("abcba", "abcba"),
("abcac", "cac"),
("abacabxc", "bacab"),
("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
])
def test(s, expected):
assert Solution().longestPalindrome(s) == expected
| 107.434783
| 2,013
| 0.902469
| 48
| 2,471
| 46.4375
| 0.625
| 0.012113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042898
| 2,471
| 22
| 2,014
| 112.318182
| 0.942495
| 0
| 0
| 0
| 0
| 0
| 0.848644
| 0.809389
| 0
| 1
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.15
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86856bb02d92ce188ce5e494d1ee917edcbc9a4e
| 9,964
|
py
|
Python
|
tests/test_alpaca_data_loader.py
|
andywitt1/LiuAlgoTrader
|
7e3ec8fb06cf6e616f2e91c99a7de83b5f048f70
|
[
"MIT"
] | null | null | null |
tests/test_alpaca_data_loader.py
|
andywitt1/LiuAlgoTrader
|
7e3ec8fb06cf6e616f2e91c99a7de83b5f048f70
|
[
"MIT"
] | null | null | null |
tests/test_alpaca_data_loader.py
|
andywitt1/LiuAlgoTrader
|
7e3ec8fb06cf6e616f2e91c99a7de83b5f048f70
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime
import pandas as pd
import pytest
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.data_loader import DataLoader # type: ignore
from liualgotrader.common.types import DataConnectorType, TimeScale
nyc = timezone("America/New_York")
@pytest.mark.devtest
def test_create_data_loader_default() -> bool:
return bool(DataLoader(connector=DataConnectorType.alpaca))
@pytest.mark.devtest
def test_apple_stock_latest_price() -> bool:
print("test_apple_stock_latest_price")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
print(f"apple {last_price} @ {last_price_time}")
return True
@pytest.mark.devtest
def test_apple_stock_current_price() -> bool:
print("test_apple_stock_current_price")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
before_price = dl["AAPL"].close[-5]
before_price_time = dl["AAPL"].close.index[-5]
print(
f"apple {last_price} @ {last_price_time}, before was {before_price}@{before_price_time}"
)
return True
@pytest.mark.devtest
def test_apple_stock_current_price_range_int_minute() -> bool:
print("test_apple_stock_current_price_range_int_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_current_price_range_int_day() -> bool:
print("test_apple_stock_current_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[-6:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def no_test_apple_stock_daily_price() -> bool:
print("test_apple_stock_daily_price")
dl = DataLoader(scale=TimeScale.day, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
print(last_price, last_price_time)
before_price = dl["AAPL"].close[-5]
print(f"before_price {before_price}, {dl['AAPL']}")
print(f"apple {last_price} @ {last_price_time}, before was {before_price}")
return True
@pytest.mark.devtest
def test_negative_current_price() -> bool:
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
dl["DFGDFGDFG"].close[-1]
except ValueError:
return True
return False
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_day() -> bool:
print("test_apple_stock_close_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-01-01":"2021-01-05" # type:ignore
] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_minute() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-01-05 09:45:00":"2021-01-05 09:50:00" # type:ignore
]
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_minute_int() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-12-15 09:45:00":-1 # type:ignore
] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_int_minute() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"][-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_int_day() -> bool:
print("test_apple_stock_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"][-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_day() -> bool:
print("test_apple_stock_price_range_date_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":"2020-10-08"] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_day() -> bool:
print("test_apple_stock_price_range_date_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_min() -> bool:
print("test_apple_stock_price_range_date_int_min")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_min_open() -> bool:
print("test_apple_stock_price_range_date_int_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_int_min_open() -> bool:
print("test_apple_stock_price_close_range_date_int_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].open["2020-10-05":] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min_open() -> bool:
print("test_apple_stock_price_range_date_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
last_price_range = dl["AAPL"][:] # type:ignore
print(last_price_range)
except ValueError:
return True
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_min_open() -> bool:
print("test_apple_stock_price_open_range_date_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
last_price_range = dl["AAPL"].open[:] # type:ignore
print(last_price_range)
except ValueError:
return True
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min() -> bool:
print("test_apple_stock_price_range_date_min")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
d2 = date(year=2021, month=2, day=2)
last_price_range = dl["AAPL"][d1:d2].between_time( # type:ignore
"9:30", "16:00"
) # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min_mixed() -> bool:
print("test_apple_stock_price_range_date_min_mixed")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][d1:"2021-02-02"].between_time( # type:ignore
"9:30", "16:00"
) # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_day_mixed() -> bool:
print("test_apple_stock_price_range_date_day_mixed")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][d1:"2021-02-02"] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_min_mixed() -> bool:
print("test_apple_stock_price_range_date_min_mixed")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = datetime(year=2021, month=2, day=1, hour=3, minute=0)
last_price_range = (
dl["AAPL"]
.open[d1:"2021-02-01 21:00:00"] # type:ignore
.between_time("9:30", "16:00") # type:ignore
)
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_str() -> bool:
print("test_apple_stock_price_open_str")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"].open["2021-02-02 09:45:00"]
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_date() -> bool:
print("test_apple_stock_price_open_date")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = nyc.localize(datetime(year=2021, month=2, day=1, hour=9, minute=30))
last_price_range = dl["AAPL"].open[d1]
print(last_price_range)
return True
@pytest.mark.devtest
def test_get_symbols_alpaca() -> bool:
print("test_get_symbols_alpaca")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
tickers = dl.data_api.get_symbols()
print(len(tickers))
return True
@pytest.mark.devtest
def test_apple_update() -> bool:
print("test_apple_stock_price_open_str")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][-1]
print("after this")
dl["AAPL"].loc["2021-02-02 09:46:00"] = [
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
]
print(dl["AAPL"].loc["2021-02-02 09:46:00"])
return True
| 30.753086
| 96
| 0.724508
| 1,402
| 9,964
| 4.835235
| 0.077746
| 0.10621
| 0.097064
| 0.084083
| 0.89497
| 0.880661
| 0.848503
| 0.807641
| 0.767222
| 0.720018
| 0
| 0.038095
| 0.156965
| 9,964
| 323
| 97
| 30.848297
| 0.768929
| 0.028904
| 0
| 0.588477
| 0
| 0
| 0.169102
| 0.105067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.028807
| 0.004115
| 0.263374
| 0.218107
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86ba11df826b1a85b469e85b635d2a5d86cc38c0
| 142
|
py
|
Python
|
rsockets2/transport/__init__.py
|
freelancer1845/rsockets2-try
|
25b8b38e00925c3feb6c6e790624a35bc8689619
|
[
"Apache-2.0"
] | 3
|
2020-05-08T09:45:44.000Z
|
2020-11-13T11:39:06.000Z
|
rsockets2/transport/__init__.py
|
freelancer1845/rsockets2-try
|
25b8b38e00925c3feb6c6e790624a35bc8689619
|
[
"Apache-2.0"
] | 1
|
2022-01-27T08:07:22.000Z
|
2022-01-27T08:07:22.000Z
|
rsockets2/transport/__init__.py
|
freelancer1845/rsockets2-try
|
25b8b38e00925c3feb6c6e790624a35bc8689619
|
[
"Apache-2.0"
] | 1
|
2020-05-08T09:47:14.000Z
|
2020-05-08T09:47:14.000Z
|
from .abstract_transport import AbstractTransport
from .tcp_transport import TcpTransport
from .websocket_transport import WebsocketTransport
| 35.5
| 51
| 0.894366
| 15
| 142
| 8.266667
| 0.6
| 0.362903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 142
| 3
| 52
| 47.333333
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4961ae1071073b986470319fdd2d6becb467c03
| 37,894
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/60.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/60.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/60.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 3241
passenger_arriving = (
(2, 4, 9, 5, 2, 0, 7, 7, 4, 5, 1, 0), # 0
(1, 9, 7, 4, 1, 0, 6, 6, 9, 6, 1, 0), # 1
(5, 6, 8, 4, 3, 0, 8, 11, 5, 2, 1, 0), # 2
(4, 5, 5, 4, 2, 0, 8, 15, 0, 4, 4, 0), # 3
(5, 11, 13, 4, 3, 0, 11, 6, 3, 1, 1, 0), # 4
(9, 6, 8, 3, 0, 0, 7, 6, 9, 7, 4, 0), # 5
(8, 12, 3, 4, 1, 0, 4, 12, 5, 4, 0, 0), # 6
(4, 13, 11, 2, 3, 0, 9, 9, 6, 5, 1, 0), # 7
(3, 8, 7, 7, 3, 0, 5, 9, 7, 5, 1, 0), # 8
(2, 5, 5, 1, 3, 0, 3, 3, 5, 6, 5, 0), # 9
(5, 8, 9, 3, 1, 0, 8, 12, 4, 3, 4, 0), # 10
(3, 12, 6, 3, 2, 0, 6, 6, 8, 6, 1, 0), # 11
(1, 10, 8, 7, 1, 0, 10, 7, 6, 5, 1, 0), # 12
(4, 9, 8, 3, 1, 0, 7, 11, 12, 3, 3, 0), # 13
(2, 10, 6, 3, 2, 0, 9, 7, 4, 7, 1, 0), # 14
(2, 6, 9, 2, 4, 0, 7, 12, 7, 8, 2, 0), # 15
(3, 14, 8, 5, 5, 0, 8, 10, 5, 5, 1, 0), # 16
(2, 8, 6, 5, 2, 0, 6, 5, 3, 7, 0, 0), # 17
(3, 11, 8, 2, 0, 0, 4, 3, 4, 7, 0, 0), # 18
(2, 7, 4, 2, 4, 0, 7, 4, 3, 6, 2, 0), # 19
(7, 16, 5, 3, 3, 0, 8, 6, 6, 4, 2, 0), # 20
(1, 6, 11, 5, 0, 0, 4, 11, 6, 9, 0, 0), # 21
(5, 8, 7, 5, 2, 0, 10, 11, 4, 3, 2, 0), # 22
(2, 8, 14, 3, 3, 0, 3, 8, 4, 4, 3, 0), # 23
(3, 7, 5, 5, 5, 0, 5, 8, 4, 5, 3, 0), # 24
(7, 5, 5, 4, 3, 0, 2, 5, 6, 6, 1, 0), # 25
(7, 12, 3, 6, 3, 0, 4, 10, 9, 3, 4, 0), # 26
(1, 10, 8, 2, 1, 0, 5, 13, 5, 5, 0, 0), # 27
(6, 4, 9, 6, 3, 0, 6, 8, 7, 7, 3, 0), # 28
(6, 9, 13, 7, 1, 0, 3, 10, 7, 3, 4, 0), # 29
(4, 5, 5, 2, 2, 0, 9, 14, 6, 7, 1, 0), # 30
(2, 7, 3, 3, 1, 0, 7, 7, 4, 4, 2, 0), # 31
(4, 5, 9, 0, 2, 0, 5, 15, 3, 2, 1, 0), # 32
(4, 10, 9, 2, 3, 0, 11, 8, 4, 4, 1, 0), # 33
(6, 11, 6, 3, 1, 0, 8, 4, 10, 6, 2, 0), # 34
(6, 4, 3, 4, 3, 0, 10, 9, 6, 6, 0, 0), # 35
(4, 6, 15, 4, 2, 0, 13, 8, 3, 6, 2, 0), # 36
(6, 9, 5, 5, 6, 0, 10, 14, 2, 7, 3, 0), # 37
(2, 7, 11, 7, 1, 0, 7, 7, 5, 3, 1, 0), # 38
(4, 11, 7, 4, 2, 0, 6, 13, 3, 2, 3, 0), # 39
(10, 12, 10, 3, 3, 0, 4, 10, 6, 7, 1, 0), # 40
(6, 6, 4, 1, 2, 0, 7, 9, 5, 4, 0, 0), # 41
(6, 7, 10, 5, 3, 0, 0, 15, 9, 7, 0, 0), # 42
(8, 11, 8, 2, 3, 0, 9, 12, 7, 1, 3, 0), # 43
(5, 12, 13, 3, 2, 0, 8, 12, 3, 4, 4, 0), # 44
(5, 8, 4, 6, 0, 0, 8, 9, 5, 3, 5, 0), # 45
(11, 18, 7, 2, 0, 0, 5, 13, 6, 3, 3, 0), # 46
(5, 9, 2, 5, 0, 0, 4, 8, 5, 3, 1, 0), # 47
(5, 9, 8, 7, 1, 0, 11, 10, 4, 4, 0, 0), # 48
(0, 10, 8, 4, 1, 0, 6, 7, 7, 6, 2, 0), # 49
(8, 8, 7, 4, 3, 0, 4, 11, 4, 8, 1, 0), # 50
(9, 12, 10, 3, 2, 0, 7, 9, 6, 7, 3, 0), # 51
(3, 8, 7, 4, 3, 0, 3, 11, 5, 4, 1, 0), # 52
(6, 8, 6, 3, 4, 0, 9, 10, 6, 5, 3, 0), # 53
(2, 17, 11, 4, 2, 0, 5, 14, 6, 12, 2, 0), # 54
(3, 8, 5, 3, 2, 0, 12, 9, 5, 4, 1, 0), # 55
(1, 6, 9, 4, 6, 0, 11, 10, 6, 8, 2, 0), # 56
(5, 6, 3, 5, 1, 0, 8, 9, 3, 6, 1, 0), # 57
(0, 11, 6, 5, 0, 0, 4, 13, 5, 4, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(3.7095121817383676, 9.515044981060607, 11.19193043059126, 8.87078804347826, 10.000240384615385, 6.659510869565219), # 0
(3.7443308140669203, 9.620858238197952, 11.252381752534994, 8.920190141908213, 10.075193108974359, 6.657240994867151), # 1
(3.7787518681104277, 9.725101964085297, 11.31139817195087, 8.968504830917876, 10.148564102564103, 6.654901690821256), # 2
(3.8127461259877085, 9.827663671875001, 11.368936576156813, 9.01569089673913, 10.22028605769231, 6.652493274456523), # 3
(3.8462843698175795, 9.928430874719417, 11.424953852470724, 9.061707125603865, 10.290291666666668, 6.6500160628019325), # 4
(3.879337381718857, 10.027291085770905, 11.479406888210512, 9.106512303743962, 10.358513621794872, 6.647470372886473), # 5
(3.9118759438103607, 10.12413181818182, 11.53225257069409, 9.150065217391306, 10.424884615384617, 6.644856521739131), # 6
(3.943870838210907, 10.218840585104518, 11.58344778723936, 9.19232465277778, 10.489337339743592, 6.64217482638889), # 7
(3.975292847039314, 10.311304899691358, 11.632949425164242, 9.233249396135266, 10.551804487179488, 6.639425603864735), # 8
(4.006112752414399, 10.401412275094698, 11.680714371786634, 9.272798233695653, 10.61221875, 6.636609171195653), # 9
(4.03630133645498, 10.489050224466892, 11.72669951442445, 9.310929951690824, 10.670512820512823, 6.633725845410628), # 10
(4.065829381279876, 10.5741062609603, 11.7708617403956, 9.347603336352659, 10.726619391025642, 6.630775943538648), # 11
(4.094667669007903, 10.656467897727273, 11.813157937017996, 9.382777173913043, 10.780471153846154, 6.627759782608695), # 12
(4.122786981757876, 10.736022647920176, 11.85354499160954, 9.416410250603866, 10.832000801282053, 6.624677679649759), # 13
(4.15015810164862, 10.81265802469136, 11.891979791488144, 9.448461352657004, 10.881141025641025, 6.621529951690821), # 14
(4.1767518107989465, 10.886261541193182, 11.928419223971721, 9.478889266304348, 10.92782451923077, 6.618316915760871), # 15
(4.202538891327675, 10.956720710578002, 11.96282017637818, 9.507652777777778, 10.971983974358976, 6.61503888888889), # 16
(4.227490125353625, 11.023923045998176, 11.995139536025421, 9.53471067330918, 11.013552083333336, 6.611696188103866), # 17
(4.25157629499561, 11.087756060606061, 12.025334190231364, 9.560021739130436, 11.052461538461543, 6.608289130434783), # 18
(4.274768182372451, 11.148107267554012, 12.053361026313912, 9.58354476147343, 11.088645032051284, 6.604818032910629), # 19
(4.297036569602966, 11.204864179994388, 12.079176931590974, 9.60523852657005, 11.122035256410259, 6.601283212560387), # 20
(4.318352238805971, 11.257914311079544, 12.102738793380466, 9.625061820652174, 11.152564903846153, 6.597684986413044), # 21
(4.338685972100283, 11.307145173961842, 12.124003499000287, 9.642973429951692, 11.180166666666667, 6.5940236714975855), # 22
(4.358008551604722, 11.352444281793632, 12.142927935768354, 9.658932140700484, 11.204773237179488, 6.590299584842997), # 23
(4.3762907594381035, 11.393699147727272, 12.159468991002571, 9.672896739130437, 11.226317307692307, 6.586513043478261), # 24
(4.393503377719247, 11.430797284915124, 12.173583552020853, 9.684826011473431, 11.244731570512819, 6.582664364432368), # 25
(4.409617188566969, 11.46362620650954, 12.185228506141103, 9.694678743961353, 11.259948717948719, 6.5787538647343), # 26
(4.424602974100088, 11.492073425662877, 12.194360740681233, 9.702413722826089, 11.271901442307694, 6.574781861413045), # 27
(4.438431516437421, 11.516026455527497, 12.200937142959157, 9.707989734299519, 11.280522435897437, 6.570748671497586), # 28
(4.4510735976977855, 11.535372809255753, 12.204914600292774, 9.711365564613528, 11.285744391025641, 6.566654612016909), # 29
(4.4625, 11.55, 12.20625, 9.7125, 11.287500000000001, 6.562500000000001), # 30
(4.47319183983376, 11.56215031960227, 12.205248928140096, 9.712295118464054, 11.286861125886526, 6.556726763701484), # 31
(4.4836528452685425, 11.574140056818184, 12.202274033816424, 9.711684477124184, 11.28495815602837, 6.547834661835751), # 32
(4.493887715792838, 11.585967720170455, 12.197367798913046, 9.710674080882354, 11.281811569148937, 6.535910757121439), # 33
(4.503901150895141, 11.597631818181819, 12.19057270531401, 9.709269934640524, 11.277441843971632, 6.521042112277196), # 34
(4.513697850063939, 11.609130859374998, 12.181931234903383, 9.707478043300654, 11.27186945921986, 6.503315790021656), # 35
(4.523282512787724, 11.62046335227273, 12.171485869565219, 9.705304411764708, 11.265114893617023, 6.482818853073463), # 36
(4.532659838554988, 11.631627805397729, 12.159279091183576, 9.70275504493464, 11.257198625886524, 6.4596383641512585), # 37
(4.5418345268542195, 11.642622727272729, 12.145353381642513, 9.699835947712419, 11.248141134751775, 6.433861385973679), # 38
(4.5508112771739135, 11.653446626420456, 12.129751222826087, 9.696553125000001, 11.23796289893617, 6.40557498125937), # 39
(4.559594789002558, 11.664098011363638, 12.11251509661836, 9.692912581699348, 11.22668439716312, 6.37486621272697), # 40
(4.568189761828645, 11.674575390625, 12.093687484903382, 9.68892032271242, 11.214326108156028, 6.34182214309512), # 41
(4.576600895140665, 11.684877272727276, 12.07331086956522, 9.684582352941177, 11.2009085106383, 6.3065298350824595), # 42
(4.584832888427111, 11.69500216619318, 12.051427732487923, 9.679904677287583, 11.186452083333334, 6.26907635140763), # 43
(4.592890441176471, 11.704948579545455, 12.028080555555556, 9.674893300653595, 11.17097730496454, 6.229548754789272), # 44
(4.600778252877237, 11.714715021306818, 12.003311820652177, 9.669554227941177, 11.15450465425532, 6.188034107946028), # 45
(4.6085010230179035, 11.724300000000003, 11.97716400966184, 9.663893464052288, 11.137054609929079, 6.144619473596536), # 46
(4.616063451086957, 11.733702024147728, 11.9496796044686, 9.65791701388889, 11.118647650709221, 6.099391914459438), # 47
(4.623470236572891, 11.742919602272728, 11.920901086956523, 9.651630882352942, 11.099304255319149, 6.052438493253375), # 48
(4.630726078964194, 11.751951242897727, 11.890870939009663, 9.645041074346407, 11.079044902482272, 6.003846272696985), # 49
(4.6378356777493615, 11.760795454545454, 11.85963164251208, 9.638153594771243, 11.057890070921987, 5.953702315508913), # 50
(4.6448037324168805, 11.769450745738636, 11.827225679347826, 9.630974448529413, 11.035860239361703, 5.902093684407797), # 51
(4.651634942455243, 11.777915625, 11.793695531400965, 9.623509640522876, 11.012975886524824, 5.849107442112278), # 52
(4.658334007352941, 11.786188600852274, 11.759083680555555, 9.615765175653596, 10.989257491134753, 5.794830651340996), # 53
(4.6649056265984665, 11.79426818181818, 11.723432608695653, 9.60774705882353, 10.964725531914894, 5.739350374812594), # 54
(4.671354499680307, 11.802152876420456, 11.686784797705313, 9.599461294934642, 10.939400487588653, 5.682753675245711), # 55
(4.677685326086957, 11.809841193181818, 11.649182729468599, 9.59091388888889, 10.913302836879433, 5.625127615358988), # 56
(4.683902805306906, 11.817331640625003, 11.610668885869565, 9.582110845588236, 10.886453058510638, 5.566559257871065), # 57
(4.690011636828645, 11.824622727272727, 11.57128574879227, 9.573058169934642, 10.858871631205675, 5.507135665500583), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(2, 4, 9, 5, 2, 0, 7, 7, 4, 5, 1, 0), # 0
(3, 13, 16, 9, 3, 0, 13, 13, 13, 11, 2, 0), # 1
(8, 19, 24, 13, 6, 0, 21, 24, 18, 13, 3, 0), # 2
(12, 24, 29, 17, 8, 0, 29, 39, 18, 17, 7, 0), # 3
(17, 35, 42, 21, 11, 0, 40, 45, 21, 18, 8, 0), # 4
(26, 41, 50, 24, 11, 0, 47, 51, 30, 25, 12, 0), # 5
(34, 53, 53, 28, 12, 0, 51, 63, 35, 29, 12, 0), # 6
(38, 66, 64, 30, 15, 0, 60, 72, 41, 34, 13, 0), # 7
(41, 74, 71, 37, 18, 0, 65, 81, 48, 39, 14, 0), # 8
(43, 79, 76, 38, 21, 0, 68, 84, 53, 45, 19, 0), # 9
(48, 87, 85, 41, 22, 0, 76, 96, 57, 48, 23, 0), # 10
(51, 99, 91, 44, 24, 0, 82, 102, 65, 54, 24, 0), # 11
(52, 109, 99, 51, 25, 0, 92, 109, 71, 59, 25, 0), # 12
(56, 118, 107, 54, 26, 0, 99, 120, 83, 62, 28, 0), # 13
(58, 128, 113, 57, 28, 0, 108, 127, 87, 69, 29, 0), # 14
(60, 134, 122, 59, 32, 0, 115, 139, 94, 77, 31, 0), # 15
(63, 148, 130, 64, 37, 0, 123, 149, 99, 82, 32, 0), # 16
(65, 156, 136, 69, 39, 0, 129, 154, 102, 89, 32, 0), # 17
(68, 167, 144, 71, 39, 0, 133, 157, 106, 96, 32, 0), # 18
(70, 174, 148, 73, 43, 0, 140, 161, 109, 102, 34, 0), # 19
(77, 190, 153, 76, 46, 0, 148, 167, 115, 106, 36, 0), # 20
(78, 196, 164, 81, 46, 0, 152, 178, 121, 115, 36, 0), # 21
(83, 204, 171, 86, 48, 0, 162, 189, 125, 118, 38, 0), # 22
(85, 212, 185, 89, 51, 0, 165, 197, 129, 122, 41, 0), # 23
(88, 219, 190, 94, 56, 0, 170, 205, 133, 127, 44, 0), # 24
(95, 224, 195, 98, 59, 0, 172, 210, 139, 133, 45, 0), # 25
(102, 236, 198, 104, 62, 0, 176, 220, 148, 136, 49, 0), # 26
(103, 246, 206, 106, 63, 0, 181, 233, 153, 141, 49, 0), # 27
(109, 250, 215, 112, 66, 0, 187, 241, 160, 148, 52, 0), # 28
(115, 259, 228, 119, 67, 0, 190, 251, 167, 151, 56, 0), # 29
(119, 264, 233, 121, 69, 0, 199, 265, 173, 158, 57, 0), # 30
(121, 271, 236, 124, 70, 0, 206, 272, 177, 162, 59, 0), # 31
(125, 276, 245, 124, 72, 0, 211, 287, 180, 164, 60, 0), # 32
(129, 286, 254, 126, 75, 0, 222, 295, 184, 168, 61, 0), # 33
(135, 297, 260, 129, 76, 0, 230, 299, 194, 174, 63, 0), # 34
(141, 301, 263, 133, 79, 0, 240, 308, 200, 180, 63, 0), # 35
(145, 307, 278, 137, 81, 0, 253, 316, 203, 186, 65, 0), # 36
(151, 316, 283, 142, 87, 0, 263, 330, 205, 193, 68, 0), # 37
(153, 323, 294, 149, 88, 0, 270, 337, 210, 196, 69, 0), # 38
(157, 334, 301, 153, 90, 0, 276, 350, 213, 198, 72, 0), # 39
(167, 346, 311, 156, 93, 0, 280, 360, 219, 205, 73, 0), # 40
(173, 352, 315, 157, 95, 0, 287, 369, 224, 209, 73, 0), # 41
(179, 359, 325, 162, 98, 0, 287, 384, 233, 216, 73, 0), # 42
(187, 370, 333, 164, 101, 0, 296, 396, 240, 217, 76, 0), # 43
(192, 382, 346, 167, 103, 0, 304, 408, 243, 221, 80, 0), # 44
(197, 390, 350, 173, 103, 0, 312, 417, 248, 224, 85, 0), # 45
(208, 408, 357, 175, 103, 0, 317, 430, 254, 227, 88, 0), # 46
(213, 417, 359, 180, 103, 0, 321, 438, 259, 230, 89, 0), # 47
(218, 426, 367, 187, 104, 0, 332, 448, 263, 234, 89, 0), # 48
(218, 436, 375, 191, 105, 0, 338, 455, 270, 240, 91, 0), # 49
(226, 444, 382, 195, 108, 0, 342, 466, 274, 248, 92, 0), # 50
(235, 456, 392, 198, 110, 0, 349, 475, 280, 255, 95, 0), # 51
(238, 464, 399, 202, 113, 0, 352, 486, 285, 259, 96, 0), # 52
(244, 472, 405, 205, 117, 0, 361, 496, 291, 264, 99, 0), # 53
(246, 489, 416, 209, 119, 0, 366, 510, 297, 276, 101, 0), # 54
(249, 497, 421, 212, 121, 0, 378, 519, 302, 280, 102, 0), # 55
(250, 503, 430, 216, 127, 0, 389, 529, 308, 288, 104, 0), # 56
(255, 509, 433, 221, 128, 0, 397, 538, 311, 294, 105, 0), # 57
(255, 520, 439, 226, 128, 0, 401, 551, 316, 298, 107, 0), # 58
(255, 520, 439, 226, 128, 0, 401, 551, 316, 298, 107, 0), # 59
)
passenger_arriving_rate = (
(3.7095121817383676, 7.612035984848484, 6.715158258354756, 3.5483152173913037, 2.000048076923077, 0.0, 6.659510869565219, 8.000192307692307, 5.322472826086956, 4.476772172236504, 1.903008996212121, 0.0), # 0
(3.7443308140669203, 7.696686590558361, 6.751429051520996, 3.5680760567632848, 2.0150386217948717, 0.0, 6.657240994867151, 8.060154487179487, 5.352114085144928, 4.500952701013997, 1.9241716476395903, 0.0), # 1
(3.7787518681104277, 7.780081571268237, 6.786838903170522, 3.58740193236715, 2.0297128205128203, 0.0, 6.654901690821256, 8.118851282051281, 5.381102898550726, 4.524559268780347, 1.9450203928170593, 0.0), # 2
(3.8127461259877085, 7.8621309375, 6.821361945694087, 3.6062763586956517, 2.044057211538462, 0.0, 6.652493274456523, 8.176228846153847, 5.409414538043478, 4.547574630462725, 1.965532734375, 0.0), # 3
(3.8462843698175795, 7.942744699775533, 6.854972311482434, 3.624682850241546, 2.0580583333333333, 0.0, 6.6500160628019325, 8.232233333333333, 5.437024275362319, 4.569981540988289, 1.9856861749438832, 0.0), # 4
(3.879337381718857, 8.021832868616723, 6.887644132926307, 3.6426049214975844, 2.0717027243589743, 0.0, 6.647470372886473, 8.286810897435897, 5.463907382246377, 4.591762755284204, 2.005458217154181, 0.0), # 5
(3.9118759438103607, 8.099305454545455, 6.919351542416455, 3.660026086956522, 2.084976923076923, 0.0, 6.644856521739131, 8.339907692307692, 5.490039130434783, 4.612901028277636, 2.0248263636363637, 0.0), # 6
(3.943870838210907, 8.175072468083613, 6.950068672343615, 3.6769298611111116, 2.0978674679487184, 0.0, 6.64217482638889, 8.391469871794873, 5.515394791666668, 4.633379114895743, 2.043768117020903, 0.0), # 7
(3.975292847039314, 8.249043919753085, 6.979769655098544, 3.693299758454106, 2.1103608974358976, 0.0, 6.639425603864735, 8.44144358974359, 5.5399496376811594, 4.653179770065696, 2.062260979938271, 0.0), # 8
(4.006112752414399, 8.321129820075758, 7.00842862307198, 3.709119293478261, 2.12244375, 0.0, 6.636609171195653, 8.489775, 5.563678940217391, 4.672285748714653, 2.0802824550189394, 0.0), # 9
(4.03630133645498, 8.391240179573513, 7.03601970865467, 3.724371980676329, 2.134102564102564, 0.0, 6.633725845410628, 8.536410256410257, 5.586557971014494, 4.690679805769779, 2.0978100448933783, 0.0), # 10
(4.065829381279876, 8.459285008768239, 7.06251704423736, 3.739041334541063, 2.145323878205128, 0.0, 6.630775943538648, 8.581295512820512, 5.608562001811595, 4.70834469615824, 2.1148212521920597, 0.0), # 11
(4.094667669007903, 8.525174318181818, 7.087894762210797, 3.7531108695652167, 2.156094230769231, 0.0, 6.627759782608695, 8.624376923076923, 5.6296663043478254, 4.725263174807198, 2.1312935795454546, 0.0), # 12
(4.122786981757876, 8.58881811833614, 7.112126994965724, 3.766564100241546, 2.1664001602564102, 0.0, 6.624677679649759, 8.665600641025641, 5.649846150362319, 4.741417996643816, 2.147204529584035, 0.0), # 13
(4.15015810164862, 8.650126419753088, 7.135187874892886, 3.779384541062801, 2.1762282051282047, 0.0, 6.621529951690821, 8.704912820512819, 5.669076811594202, 4.756791916595257, 2.162531604938272, 0.0), # 14
(4.1767518107989465, 8.709009232954545, 7.157051534383032, 3.7915557065217387, 2.1855649038461538, 0.0, 6.618316915760871, 8.742259615384615, 5.6873335597826085, 4.771367689588688, 2.177252308238636, 0.0), # 15
(4.202538891327675, 8.7653765684624, 7.177692105826908, 3.803061111111111, 2.194396794871795, 0.0, 6.61503888888889, 8.77758717948718, 5.7045916666666665, 4.785128070551272, 2.1913441421156, 0.0), # 16
(4.227490125353625, 8.81913843679854, 7.197083721615253, 3.8138842693236716, 2.202710416666667, 0.0, 6.611696188103866, 8.810841666666668, 5.720826403985508, 4.798055814410168, 2.204784609199635, 0.0), # 17
(4.25157629499561, 8.870204848484848, 7.215200514138818, 3.824008695652174, 2.2104923076923084, 0.0, 6.608289130434783, 8.841969230769234, 5.736013043478262, 4.810133676092545, 2.217551212121212, 0.0), # 18
(4.274768182372451, 8.918485814043208, 7.232016615788346, 3.8334179045893717, 2.2177290064102566, 0.0, 6.604818032910629, 8.870916025641026, 5.750126856884058, 4.8213444105255645, 2.229621453510802, 0.0), # 19
(4.297036569602966, 8.96389134399551, 7.247506158954584, 3.8420954106280196, 2.2244070512820517, 0.0, 6.601283212560387, 8.897628205128207, 5.76314311594203, 4.831670772636389, 2.2409728359988774, 0.0), # 20
(4.318352238805971, 9.006331448863634, 7.261643276028279, 3.8500247282608693, 2.2305129807692303, 0.0, 6.597684986413044, 8.922051923076921, 5.775037092391305, 4.841095517352186, 2.2515828622159084, 0.0), # 21
(4.338685972100283, 9.045716139169473, 7.274402099400172, 3.8571893719806765, 2.2360333333333333, 0.0, 6.5940236714975855, 8.944133333333333, 5.785784057971015, 4.849601399600115, 2.2614290347923682, 0.0), # 22
(4.358008551604722, 9.081955425434906, 7.285756761461012, 3.8635728562801934, 2.2409546474358972, 0.0, 6.590299584842997, 8.963818589743589, 5.79535928442029, 4.857171174307341, 2.2704888563587264, 0.0), # 23
(4.3762907594381035, 9.114959318181818, 7.295681394601543, 3.869158695652174, 2.2452634615384612, 0.0, 6.586513043478261, 8.981053846153845, 5.803738043478262, 4.863787596401028, 2.2787398295454544, 0.0), # 24
(4.393503377719247, 9.1446378279321, 7.304150131212511, 3.8739304045893723, 2.2489463141025636, 0.0, 6.582664364432368, 8.995785256410255, 5.810895606884059, 4.869433420808341, 2.286159456983025, 0.0), # 25
(4.409617188566969, 9.17090096520763, 7.311137103684661, 3.8778714975845405, 2.2519897435897436, 0.0, 6.5787538647343, 9.007958974358974, 5.816807246376811, 4.874091402456441, 2.2927252413019077, 0.0), # 26
(4.424602974100088, 9.193658740530301, 7.31661644440874, 3.880965489130435, 2.2543802884615385, 0.0, 6.574781861413045, 9.017521153846154, 5.821448233695653, 4.877744296272493, 2.2984146851325753, 0.0), # 27
(4.438431516437421, 9.212821164421996, 7.320562285775494, 3.8831958937198072, 2.256104487179487, 0.0, 6.570748671497586, 9.024417948717948, 5.824793840579711, 4.8803748571836625, 2.303205291105499, 0.0), # 28
(4.4510735976977855, 9.228298247404602, 7.322948760175664, 3.884546225845411, 2.257148878205128, 0.0, 6.566654612016909, 9.028595512820512, 5.826819338768117, 4.881965840117109, 2.3070745618511506, 0.0), # 29
(4.4625, 9.24, 7.32375, 3.885, 2.2575000000000003, 0.0, 6.562500000000001, 9.030000000000001, 5.8275, 4.8825, 2.31, 0.0), # 30
(4.47319183983376, 9.249720255681815, 7.323149356884057, 3.884918047385621, 2.257372225177305, 0.0, 6.556726763701484, 9.02948890070922, 5.827377071078432, 4.882099571256038, 2.312430063920454, 0.0), # 31
(4.4836528452685425, 9.259312045454546, 7.3213644202898545, 3.884673790849673, 2.2569916312056737, 0.0, 6.547834661835751, 9.027966524822695, 5.82701068627451, 4.880909613526569, 2.3148280113636366, 0.0), # 32
(4.493887715792838, 9.268774176136363, 7.3184206793478275, 3.8842696323529413, 2.2563623138297872, 0.0, 6.535910757121439, 9.025449255319149, 5.826404448529412, 4.878947119565218, 2.3171935440340907, 0.0), # 33
(4.503901150895141, 9.278105454545454, 7.314343623188405, 3.8837079738562093, 2.2554883687943263, 0.0, 6.521042112277196, 9.021953475177305, 5.825561960784314, 4.876229082125604, 2.3195263636363634, 0.0), # 34
(4.513697850063939, 9.287304687499997, 7.3091587409420296, 3.882991217320261, 2.2543738918439717, 0.0, 6.503315790021656, 9.017495567375887, 5.824486825980392, 4.872772493961353, 2.3218261718749993, 0.0), # 35
(4.523282512787724, 9.296370681818182, 7.302891521739131, 3.8821217647058828, 2.253022978723404, 0.0, 6.482818853073463, 9.012091914893617, 5.823182647058824, 4.868594347826087, 2.3240926704545455, 0.0), # 36
(4.532659838554988, 9.305302244318183, 7.295567454710145, 3.881102017973856, 2.2514397251773044, 0.0, 6.4596383641512585, 9.005758900709218, 5.821653026960784, 4.86371163647343, 2.3263255610795457, 0.0), # 37
(4.5418345268542195, 9.314098181818181, 7.287212028985508, 3.8799343790849674, 2.249628226950355, 0.0, 6.433861385973679, 8.99851290780142, 5.819901568627452, 4.858141352657005, 2.3285245454545453, 0.0), # 38
(4.5508112771739135, 9.322757301136363, 7.277850733695652, 3.87862125, 2.247592579787234, 0.0, 6.40557498125937, 8.990370319148935, 5.817931875, 4.8519004891304345, 2.330689325284091, 0.0), # 39
(4.559594789002558, 9.33127840909091, 7.267509057971015, 3.8771650326797387, 2.245336879432624, 0.0, 6.37486621272697, 8.981347517730496, 5.815747549019608, 4.845006038647344, 2.3328196022727274, 0.0), # 40
(4.568189761828645, 9.3396603125, 7.256212490942029, 3.8755681290849675, 2.2428652216312055, 0.0, 6.34182214309512, 8.971460886524822, 5.813352193627452, 4.837474993961353, 2.334915078125, 0.0), # 41
(4.576600895140665, 9.34790181818182, 7.2439865217391315, 3.8738329411764707, 2.2401817021276598, 0.0, 6.3065298350824595, 8.960726808510639, 5.810749411764706, 4.829324347826088, 2.336975454545455, 0.0), # 42
(4.584832888427111, 9.356001732954544, 7.230856639492753, 3.8719618709150327, 2.2372904166666667, 0.0, 6.26907635140763, 8.949161666666667, 5.80794280637255, 4.820571092995169, 2.339000433238636, 0.0), # 43
(4.592890441176471, 9.363958863636363, 7.216848333333333, 3.8699573202614377, 2.2341954609929076, 0.0, 6.229548754789272, 8.93678184397163, 5.804935980392157, 4.811232222222222, 2.3409897159090907, 0.0), # 44
(4.600778252877237, 9.371772017045453, 7.201987092391306, 3.8678216911764705, 2.230900930851064, 0.0, 6.188034107946028, 8.923603723404256, 5.801732536764706, 4.80132472826087, 2.3429430042613633, 0.0), # 45
(4.6085010230179035, 9.379440000000002, 7.186298405797103, 3.8655573856209147, 2.2274109219858156, 0.0, 6.144619473596536, 8.909643687943262, 5.798336078431372, 4.790865603864735, 2.3448600000000006, 0.0), # 46
(4.616063451086957, 9.386961619318182, 7.16980776268116, 3.8631668055555552, 2.223729530141844, 0.0, 6.099391914459438, 8.894918120567375, 5.794750208333333, 4.77987184178744, 2.3467404048295455, 0.0), # 47
(4.623470236572891, 9.394335681818182, 7.152540652173913, 3.8606523529411763, 2.21986085106383, 0.0, 6.052438493253375, 8.87944340425532, 5.790978529411765, 4.7683604347826085, 2.3485839204545456, 0.0), # 48
(4.630726078964194, 9.401560994318181, 7.134522563405797, 3.8580164297385626, 2.2158089804964543, 0.0, 6.003846272696985, 8.863235921985817, 5.787024644607844, 4.7563483756038645, 2.3503902485795454, 0.0), # 49
(4.6378356777493615, 9.408636363636361, 7.115778985507247, 3.8552614379084966, 2.211578014184397, 0.0, 5.953702315508913, 8.846312056737588, 5.782892156862745, 4.743852657004831, 2.3521590909090904, 0.0), # 50
(4.6448037324168805, 9.415560596590907, 7.096335407608696, 3.852389779411765, 2.2071720478723407, 0.0, 5.902093684407797, 8.828688191489363, 5.778584669117648, 4.73089027173913, 2.353890149147727, 0.0), # 51
(4.651634942455243, 9.4223325, 7.0762173188405795, 3.84940385620915, 2.2025951773049646, 0.0, 5.849107442112278, 8.810380709219858, 5.774105784313726, 4.717478212560386, 2.355583125, 0.0), # 52
(4.658334007352941, 9.428950880681818, 7.055450208333333, 3.8463060702614382, 2.1978514982269504, 0.0, 5.794830651340996, 8.791405992907801, 5.769459105392158, 4.703633472222222, 2.3572377201704544, 0.0), # 53
(4.6649056265984665, 9.435414545454544, 7.034059565217391, 3.843098823529412, 2.192945106382979, 0.0, 5.739350374812594, 8.771780425531915, 5.764648235294119, 4.689373043478261, 2.358853636363636, 0.0), # 54
(4.671354499680307, 9.441722301136364, 7.012070878623187, 3.8397845179738566, 2.1878800975177306, 0.0, 5.682753675245711, 8.751520390070922, 5.759676776960785, 4.674713919082125, 2.360430575284091, 0.0), # 55
(4.677685326086957, 9.447872954545453, 6.989509637681159, 3.8363655555555556, 2.1826605673758865, 0.0, 5.625127615358988, 8.730642269503546, 5.754548333333334, 4.65967309178744, 2.361968238636363, 0.0), # 56
(4.683902805306906, 9.453865312500001, 6.966401331521738, 3.832844338235294, 2.1772906117021273, 0.0, 5.566559257871065, 8.70916244680851, 5.749266507352941, 4.644267554347826, 2.3634663281250003, 0.0), # 57
(4.690011636828645, 9.459698181818181, 6.942771449275362, 3.8292232679738563, 2.1717743262411346, 0.0, 5.507135665500583, 8.687097304964539, 5.743834901960785, 4.628514299516908, 2.3649245454545453, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
59, # 1
)
| 113.116418
| 212
| 0.729139
| 5,147
| 37,894
| 5.366038
| 0.228483
| 0.312828
| 0.247656
| 0.469242
| 0.32847
| 0.327745
| 0.327745
| 0.327745
| 0.327745
| 0.327745
| 0
| 0.819053
| 0.119122
| 37,894
| 334
| 213
| 113.45509
| 0.008358
| 0.031958
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4f430c48f066f2bb72aee9f7b0be54914c560b6
| 26,491
|
py
|
Python
|
monitoring/tests/system/test_vpcsc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 1
|
2019-06-14T10:11:59.000Z
|
2019-06-14T10:11:59.000Z
|
monitoring/tests/system/test_vpcsc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | null | null | null |
monitoring/tests/system/test_vpcsc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 1
|
2020-04-14T10:47:41.000Z
|
2020-04-14T10:47:41.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT MODIFY! AUTO-GENERATED!
# This file is auto-generated on 2019-05-03.
# flake8: noqa
import os
import pytest
from google.api_core import exceptions
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
PROJECT_INSIDE = os.environ.get("PROJECT_ID", None)
PROJECT_OUTSIDE = os.environ.get(
"GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT", None
)
IS_INSIDE_VPCSC = os.environ.get("GOOGLE_CLOUD_TESTS_IN_VPCSC", "false")
class TestVPCServiceControlV3(object):
@staticmethod
def _is_rejected(call):
try:
responses = call()
# If we reach this line, then call() did not raise. The return
# result must be either a google.api_core.page_iterator.Iterator
# instance, or None.
list(responses)
except exceptions.PermissionDenied as e:
return e.message == "Request is prohibited by organization's policy"
except:
pass
return False
@staticmethod
def _do_test(delayed_inside, delayed_outside):
if IS_INSIDE_VPCSC.lower() == "true":
assert TestVPCServiceControlV3._is_rejected(delayed_outside)
assert not (TestVPCServiceControlV3._is_rejected(delayed_inside))
else:
assert not (TestVPCServiceControlV3._is_rejected(delayed_outside))
assert TestVPCServiceControlV3._is_rejected(delayed_inside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_alert_policy(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_alert_policy(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.delete_alert_policy(name_inside)
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.delete_alert_policy(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.get_alert_policy(name_inside)
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.get_alert_policy(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_alert_policies(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_alert_policies(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_alert_policies(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.update_alert_policy({"name": name_inside})
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.update_alert_policy({"name": name_outside})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_group(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_group(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.delete_group(name_inside)
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.delete_group(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.get_group(name_inside)
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.get_group(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_group_members(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_group_members(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_group_members(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_groups(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_groups(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_groups(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.update_group({"name": name_inside})
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.update_group({"name": name_outside})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_metric_descriptor(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_metric_descriptor(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_time_series(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_time_series(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_time_series(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.metric_descriptor_path(
PROJECT_INSIDE, "mock_metric_descriptor"
)
delayed_inside = lambda: client.delete_metric_descriptor(name_inside)
name_outside = client.metric_descriptor_path(
PROJECT_OUTSIDE, "mock_metric_descriptor"
)
delayed_outside = lambda: client.delete_metric_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.metric_descriptor_path(
PROJECT_INSIDE, "mock_metric_descriptor"
)
delayed_inside = lambda: client.get_metric_descriptor(name_inside)
name_outside = client.metric_descriptor_path(
PROJECT_OUTSIDE, "mock_metric_descriptor"
)
delayed_outside = lambda: client.get_metric_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_monitored_resource_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.monitored_resource_descriptor_path(
PROJECT_INSIDE, "mock_monitored_resource_descriptor"
)
delayed_inside = lambda: client.get_monitored_resource_descriptor(name_inside)
name_outside = client.monitored_resource_descriptor_path(
PROJECT_OUTSIDE, "mock_monitored_resource_descriptor"
)
delayed_outside = lambda: client.get_monitored_resource_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_metric_descriptors(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_metric_descriptors(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_metric_descriptors(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_monitored_resource_descriptors(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_monitored_resource_descriptors(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_monitored_resource_descriptors(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_time_series(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_time_series(
name_inside, "", {}, enums.ListTimeSeriesRequest.TimeSeriesView.FULL
)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_time_series(
name_outside, "", {}, enums.ListTimeSeriesRequest.TimeSeriesView.FULL
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_notification_channel(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_notification_channel(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.delete_notification_channel(name_inside)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.delete_notification_channel(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.get_notification_channel(name_inside)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.get_notification_channel(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_notification_channel_descriptor(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_descriptor_path(
PROJECT_INSIDE, "mock_notification_channel_descriptor"
)
delayed_inside = lambda: client.get_notification_channel_descriptor(name_inside)
name_outside = client.notification_channel_descriptor_path(
PROJECT_OUTSIDE, "mock_notification_channel_descriptor"
)
delayed_outside = lambda: client.get_notification_channel_descriptor(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_notification_channel_descriptors(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_notification_channel_descriptors(
name_inside
)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_notification_channel_descriptors(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_notification_channels(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_notification_channels(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_notification_channels(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.update_notification_channel(
{"name": name_inside}
)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.update_notification_channel(
{"name": name_outside}
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_uptime_check_config(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_uptime_check_config(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.delete_uptime_check_config(name_inside)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.delete_uptime_check_config(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.get_uptime_check_config(name_inside)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.get_uptime_check_config(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_uptime_check_configs(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_uptime_check_configs(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_uptime_check_configs(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.update_uptime_check_config(
{"name": name_inside}
)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.update_uptime_check_config(
{"name": name_outside}
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
| 45.206485
| 98
| 0.730701
| 2,908
| 26,491
| 6.272352
| 0.060867
| 0.046327
| 0.054386
| 0.07818
| 0.929715
| 0.920724
| 0.882182
| 0.853728
| 0.850055
| 0.846107
| 0
| 0.004094
| 0.197803
| 26,491
| 585
| 99
| 45.283761
| 0.854219
| 0.030199
| 0
| 0.573614
| 0
| 0
| 0.178347
| 0.08372
| 0
| 0
| 0
| 0
| 0.007648
| 1
| 0.063098
| false
| 0.001912
| 0.00956
| 0
| 0.078394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be0ca882a3db89c7a03c60e63dba3c68f62ed935
| 1,683
|
py
|
Python
|
molsysmt/tests/pbc/box_lengths_from_box_vectors/test_box_lengths_from_box_vectors.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tests/pbc/box_lengths_from_box_vectors/test_box_lengths_from_box_vectors.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tests/pbc/box_lengths_from_box_vectors/test_box_lengths_from_box_vectors.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
"""
Unit and regression test for the box_lengths_from_box_vectors module of the molsysmt package.
"""
# Import package, test suite, and other packages as needed
import molsysmt as msm
import numpy as np
# Distance between atoms in space and time
def test_box_lengths_from_box_vectors_1():
molsys = msm.convert(msm.demo['Met-enkephalin']['vacuum.msmpk'], to_form='molsysmt.MolSys')
molsys = msm.build.solvate(molsys, box_geometry='cubic', clearance='14.0 angstroms', engine='PDBFixer')
box = msm.get(molsys, target='system', box=True)
lengths = msm.pbc.box_lengths_from_box_vectors(box)
check = np.allclose(msm.puw.get_value(lengths, to_unit='nm'), [[3.1236, 3.1236, 3.1236]])
assert check
def test_box_lengths_from_box_vectors_2():
molsys = msm.convert(msm.demo['Met-enkephalin']['vacuum.msmpk'], to_form='molsysmt.MolSys')
molsys = msm.build.solvate(molsys, box_geometry='truncated octahedral', clearance='14.0 angstroms', engine='PDBFixer')
box = msm.get(molsys, target='system', box=True)
lengths = msm.pbc.box_lengths_from_box_vectors(box)
check = np.allclose(msm.puw.get_value(lengths, to_unit='nm'), [[3.1236, 3.1236, 3.1236]])
assert check
def test_box_lengths_from_box_vectors_3():
molsys = msm.convert(msm.demo['Met-enkephalin']['vacuum.msmpk'], to_form='molsysmt.MolSys')
molsys = msm.build.solvate(molsys, box_geometry='rhombic dodecahedral', clearance='14.0 angstroms', engine='PDBFixer')
box = msm.get(molsys, target='system', box=True)
lengths = msm.pbc.box_lengths_from_box_vectors(box)
check = np.allclose(msm.puw.get_value(lengths, to_unit='nm'), [[3.1236, 3.1236, 3.1236]])
assert check
| 48.085714
| 122
| 0.729649
| 255
| 1,683
| 4.635294
| 0.266667
| 0.038071
| 0.08291
| 0.100677
| 0.819797
| 0.799492
| 0.799492
| 0.773266
| 0.773266
| 0.773266
| 0
| 0.038697
| 0.124777
| 1,683
| 34
| 123
| 49.5
| 0.763747
| 0.114082
| 0
| 0.652174
| 0
| 0
| 0.174207
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 1
| 0.130435
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be20bf4c814f553043f910730684ae7fe8f2151e
| 46,629
|
py
|
Python
|
colour/plotting/diagrams.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/plotting/diagrams.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/plotting/diagrams.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
CIE Chromaticity Diagrams Plotting
==================================
Defines the *CIE* chromaticity diagrams plotting objects:
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1976UCS`
"""
from __future__ import annotations
import bisect
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
from matplotlib.patches import Polygon
from colour.algebra import normalise_maximum, normalise_vector
from colour.colorimetry import (
MultiSpectralDistributions,
SDS_ILLUMINANTS,
SpectralDistribution,
sd_to_XYZ,
sds_and_msds_to_sds,
)
from colour.hints import (
Any,
ArrayLike,
Boolean,
Callable,
Dict,
Floating,
Integer,
List,
Literal,
NDArray,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from colour.models import (
Luv_to_uv,
Luv_uv_to_xy,
UCS_to_uv,
UCS_uv_to_xy,
XYZ_to_Luv,
XYZ_to_UCS,
XYZ_to_xy,
xy_to_XYZ,
)
from colour.notation import HEX_to_RGB
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
CONSTANTS_ARROW_STYLE,
XYZ_to_plotting_colourspace,
artist,
filter_cmfs,
filter_illuminants,
override_style,
render,
update_settings_collection,
)
from colour.utilities import (
as_float_array,
domain_range_scale,
first_item,
is_string,
optional,
tsplit,
tstack,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"plot_spectral_locus",
"plot_chromaticity_diagram_colours",
"plot_chromaticity_diagram",
"plot_chromaticity_diagram_CIE1931",
"plot_chromaticity_diagram_CIE1960UCS",
"plot_chromaticity_diagram_CIE1976UCS",
"plot_sds_in_chromaticity_diagram",
"plot_sds_in_chromaticity_diagram_CIE1931",
"plot_sds_in_chromaticity_diagram_CIE1960UCS",
"plot_sds_in_chromaticity_diagram_CIE1976UCS",
]
@override_style()
def plot_spectral_locus(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
spectral_locus_colours: Optional[Union[ArrayLike, str]] = None,
spectral_locus_opacity: Floating = 1,
spectral_locus_labels: Optional[Sequence] = None,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Spectral Locus* according to given method.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
spectral_locus_colours
Colours of the *Spectral Locus*, if ``spectral_locus_colours`` is set
to *RGB*, the colours will be computed according to the corresponding
chromaticity coordinates.
spectral_locus_opacity
Opacity of the *Spectral Locus*.
spectral_locus_labels
Array of wavelength labels used to customise which labels will be drawn
around the spectral locus. Passing an empty array will result in no
wavelength labels being drawn.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_spectral_locus(spectral_locus_colours='RGB') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Spectral_Locus.png
:align: center
:alt: plot_spectral_locus
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
spectral_locus_colours = optional(
spectral_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint
wavelengths = list(cmfs.wavelengths)
equal_energy = np.array([1 / 3] * 2)
if method == "cie 1931":
ij = XYZ_to_xy(cmfs.values, illuminant)
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
390,
460,
470,
480,
490,
500,
510,
520,
540,
560,
580,
600,
620,
700,
),
),
)
elif method == "cie 1960 ucs":
ij = UCS_to_uv(XYZ_to_UCS(cmfs.values))
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
420,
440,
450,
460,
470,
480,
490,
500,
510,
520,
530,
540,
550,
560,
570,
580,
590,
600,
610,
620,
630,
645,
680,
),
),
)
elif method == "cie 1976 ucs":
ij = Luv_to_uv(XYZ_to_Luv(cmfs.values, illuminant), illuminant)
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
420,
440,
450,
460,
470,
480,
490,
500,
510,
520,
530,
540,
550,
560,
570,
580,
590,
600,
610,
620,
630,
645,
680,
),
),
)
pl_ij = np.reshape(
tstack(
[
np.linspace(ij[0][0], ij[-1][0], 20),
np.linspace(ij[0][1], ij[-1][1], 20),
]
),
(-1, 1, 2),
)
sl_ij = np.copy(ij).reshape(-1, 1, 2)
purple_line_colours: Optional[Union[ArrayLike, str]]
if str(spectral_locus_colours).upper() == "RGB":
spectral_locus_colours = normalise_maximum(
XYZ_to_plotting_colourspace(cmfs.values), axis=-1
)
if method == "cie 1931":
XYZ = xy_to_XYZ(pl_ij)
elif method == "cie 1960 ucs":
XYZ = xy_to_XYZ(UCS_uv_to_xy(pl_ij))
elif method == "cie 1976 ucs":
XYZ = xy_to_XYZ(Luv_uv_to_xy(pl_ij))
purple_line_colours = normalise_maximum(
XYZ_to_plotting_colourspace(np.reshape(XYZ, (-1, 3))), axis=-1
)
else:
purple_line_colours = spectral_locus_colours
for slp_ij, slp_colours in (
(pl_ij, purple_line_colours),
(sl_ij, spectral_locus_colours),
):
line_collection = LineCollection(
np.concatenate([slp_ij[:-1], slp_ij[1:]], axis=1),
colors=slp_colours,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_scatter,
)
axes.add_collection(line_collection)
wl_ij = dict(zip(wavelengths, ij))
for label in labels:
ij_l = wl_ij.get(label)
if ij_l is None:
continue
ij_l = as_float_array([ij_l])
i, j = tsplit(ij_l)
index = bisect.bisect(wavelengths, label)
left = wavelengths[index - 1] if index >= 0 else wavelengths[index]
right = (
wavelengths[index] if index < len(wavelengths) else wavelengths[-1]
)
dx = wl_ij[right][0] - wl_ij[left][0]
dy = wl_ij[right][1] - wl_ij[left][1]
direction = np.array([-dy, dx])
normal = (
np.array([-dy, dx])
if np.dot(
normalise_vector(ij_l - equal_energy),
normalise_vector(direction),
)
> 0
else np.array([dy, -dx])
)
normal = as_float_array(normalise_vector(normal) / 30)
label_colour = (
spectral_locus_colours
if is_string(spectral_locus_colours)
else spectral_locus_colours[index] # type: ignore[index]
)
axes.plot(
(i, i + normal[0] * 0.75),
(j, j + normal[1] * 0.75),
color=label_colour,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line,
)
axes.plot(
i,
j,
"o",
color=label_colour,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line,
)
axes.text(
i + normal[0],
j + normal[1],
label,
clip_on=True,
ha="left" if normal[0] >= 0 else "right",
va="center",
fontdict={"size": "small"},
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_label,
)
settings = {"axes": axes}
settings.update(kwargs)
return render(**kwargs)
@override_style()
def plot_chromaticity_diagram_colours(
samples: Integer = 256,
diagram_colours: Optional[Union[ArrayLike, str]] = None,
diagram_opacity: Floating = 1,
diagram_clipping_path: Optional[ArrayLike] = None,
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Chromaticity Diagram* colours according to given method.
Parameters
----------
samples
Samples count on one axis when computing the *Chromaticity Diagram*
colours.
diagram_colours
Colours of the *Chromaticity Diagram*, if ``diagram_colours`` is set
to *RGB*, the colours will be computed according to the corresponding
coordinates.
diagram_opacity
Opacity of the *Chromaticity Diagram*.
diagram_clipping_path
Path of points used to clip the *Chromaticity Diagram* colours.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_colours(diagram_colours='RGB')
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_Colours.png
:align: center
:alt: plot_chromaticity_diagram_colours
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
diagram_colours = cast(
ArrayLike,
optional(
diagram_colours, HEX_to_RGB(CONSTANTS_COLOUR_STYLE.colour.average)
),
)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint
if method == "cie 1931":
spectral_locus = XYZ_to_xy(cmfs.values, illuminant)
elif method == "cie 1960 ucs":
spectral_locus = UCS_to_uv(XYZ_to_UCS(cmfs.values))
elif method == "cie 1976 ucs":
spectral_locus = Luv_to_uv(
XYZ_to_Luv(cmfs.values, illuminant), illuminant
)
use_RGB_diagram_colours = str(diagram_colours).upper() == "RGB"
if use_RGB_diagram_colours:
ii, jj = np.meshgrid(
np.linspace(0, 1, samples), np.linspace(1, 0, samples)
)
ij = tstack([ii, jj])
if method == "cie 1931":
XYZ = xy_to_XYZ(ij)
elif method == "cie 1960 ucs":
XYZ = xy_to_XYZ(UCS_uv_to_xy(ij))
elif method == "cie 1976 ucs":
XYZ = xy_to_XYZ(Luv_uv_to_xy(ij))
diagram_colours = normalise_maximum(
XYZ_to_plotting_colourspace(XYZ, illuminant), axis=-1
)
polygon = Polygon(
spectral_locus
if diagram_clipping_path is None
else diagram_clipping_path,
facecolor="none"
if use_RGB_diagram_colours
else np.hstack([diagram_colours, diagram_opacity]),
edgecolor="none"
if use_RGB_diagram_colours
else np.hstack([diagram_colours, diagram_opacity]),
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.add_patch(polygon)
if use_RGB_diagram_colours:
# Preventing bounding box related issues as per
# https://github.com/matplotlib/matplotlib/issues/10529
image = axes.imshow(
diagram_colours,
interpolation="bilinear",
extent=(0, 1, 0, 1),
clip_path=None,
alpha=diagram_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
image.set_clip_path(polygon)
settings = {"axes": axes}
settings.update(kwargs)
return render(**kwargs)
@override_style()
def plot_chromaticity_diagram(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Chromaticity Diagram* according to given method.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_spectral_locus`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram_colours`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram.png
:align: center
:alt: plot_chromaticity_diagram
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
if show_diagram_colours:
settings = {"axes": axes, "method": method, "diagram_colours": "RGB"}
settings.update(kwargs)
settings["standalone"] = False
settings["cmfs"] = cmfs
plot_chromaticity_diagram_colours(**settings)
if show_spectral_locus:
settings = {"axes": axes, "method": method}
settings.update(kwargs)
settings["standalone"] = False
settings["cmfs"] = cmfs
plot_spectral_locus(**settings)
if method == "cie 1931":
x_label, y_label = "CIE x", "CIE y"
elif method == "cie 1960 ucs":
x_label, y_label = "CIE u", "CIE v"
elif method == "cie 1976 ucs":
x_label, y_label = (
"CIE u'",
"CIE v'",
)
title = f"{method.upper()} Chromaticity Diagram - {cmfs.strict_name}"
settings.update(
{
"axes": axes,
"standalone": True,
"bounding_box": (0, 1, 0, 1),
"title": title,
"x_label": x_label,
"y_label": y_label,
}
)
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_chromaticity_diagram_CIE1931(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1931 Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1931() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_chromaticity_diagram_CIE1960UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1960UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_chromaticity_diagram_CIE1976UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_sds_in_chromaticity_diagram(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable: Callable = plot_chromaticity_diagram,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*Chromaticity Diagram* using given method.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single
:class:`colour.MultiSpectralDistributions` class instance, a list
of :class:`colour.MultiSpectralDistributions` class instances or a
list of :class:`colour.SpectralDistribution` class instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
method
*Chromaticity Diagram* method.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> annotate_kwargs = [
... {'xytext': (-25, 15), 'arrowprops':{'arrowstyle':'-'}},
... {}
... ]
>>> plot_kwargs = [
... {
... 'illuminant': SDS_ILLUMINANTS['E'],
... 'markersize' : 15,
... 'normalise_sd_colours': True,
... 'use_sd_colours': True
... },
... {'illuminant': SDS_ILLUMINANTS['E']},
... ]
>>> plot_sds_in_chromaticity_diagram(
... [A, D65], annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs)
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_SDS_In_Chromaticity_Diagram.png
:align: center
:alt: plot_sds_in_chromaticity_diagram
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
sds_converted = sds_and_msds_to_sds(sds)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
settings.update(
{
"axes": axes,
"standalone": False,
"method": method,
"cmfs": cmfs,
}
)
chromaticity_diagram_callable(**settings)
if method == "cie 1931":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return XYZ_to_xy(XYZ)
bounding_box = (-0.1, 0.9, -0.1, 0.9)
elif method == "cie 1960 ucs":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return UCS_to_uv(XYZ_to_UCS(XYZ))
bounding_box = (-0.1, 0.7, -0.2, 0.6)
elif method == "cie 1976 ucs":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return Luv_to_uv(XYZ_to_Luv(XYZ))
bounding_box = (-0.1, 0.7, -0.1, 0.7)
annotate_settings_collection = [
{
"annotate": True,
"xytext": (-50, 30),
"textcoords": "offset points",
"arrowprops": CONSTANTS_ARROW_STYLE,
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_annotation,
}
for _ in range(len(sds_converted))
]
if annotate_kwargs is not None:
update_settings_collection(
annotate_settings_collection, annotate_kwargs, len(sds_converted)
)
plot_settings_collection = [
{
"color": CONSTANTS_COLOUR_STYLE.colour.brightest,
"label": f"{sd.strict_name}",
"marker": "o",
"markeredgecolor": CONSTANTS_COLOUR_STYLE.colour.dark,
"markeredgewidth": CONSTANTS_COLOUR_STYLE.geometry.short * 0.75,
"markersize": (
CONSTANTS_COLOUR_STYLE.geometry.short * 6
+ CONSTANTS_COLOUR_STYLE.geometry.short * 0.75
),
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_line,
"cmfs": cmfs,
"illuminant": SDS_ILLUMINANTS[
CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint_name
],
"use_sd_colours": False,
"normalise_sd_colours": False,
}
for sd in sds_converted
]
if plot_kwargs is not None:
update_settings_collection(
plot_settings_collection, plot_kwargs, len(sds_converted)
)
for i, sd in enumerate(sds_converted):
plot_settings = plot_settings_collection[i]
cmfs = cast(
MultiSpectralDistributions,
first_item(filter_cmfs(plot_settings.pop("cmfs")).values()),
)
illuminant = cast(
SpectralDistribution,
first_item(
filter_illuminants(plot_settings.pop("illuminant")).values()
),
)
normalise_sd_colours = plot_settings.pop("normalise_sd_colours")
use_sd_colours = plot_settings.pop("use_sd_colours")
with domain_range_scale("1"):
XYZ = sd_to_XYZ(sd, cmfs, illuminant)
if use_sd_colours:
if normalise_sd_colours:
XYZ /= XYZ[..., 1]
plot_settings["color"] = np.clip(
XYZ_to_plotting_colourspace(XYZ), 0, 1
)
ij = XYZ_to_ij(XYZ)
axes.plot(ij[0], ij[1], **plot_settings)
if sd.name is not None and annotate_settings_collection[i]["annotate"]:
annotate_settings = annotate_settings_collection[i]
annotate_settings.pop("annotate")
axes.annotate(sd.name, xy=ij, **annotate_settings)
settings.update({"standalone": True, "bounding_box": bounding_box})
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1931(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1931: Callable = (
plot_chromaticity_diagram_CIE1931
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1931 Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1931([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1931,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1960UCS(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_chromaticity_diagram_CIE1960UCS
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1960UCS,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1976UCS(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1976UCS: Callable = (
plot_chromaticity_diagram_CIE1976UCS
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1976UCS
Callable responsible for drawing the
*CIE 1976 UCS Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1976UCS,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
| 33.282655
| 79
| 0.623732
| 4,997
| 46,629
| 5.639784
| 0.077046
| 0.072812
| 0.033851
| 0.019374
| 0.80523
| 0.76705
| 0.743276
| 0.720708
| 0.703215
| 0.688205
| 0
| 0.024573
| 0.279997
| 46,629
| 1,400
| 80
| 33.306429
| 0.814851
| 0.467713
| 0
| 0.502196
| 0
| 0
| 0.089522
| 0.01579
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019034
| false
| 0
| 0.019034
| 0
| 0.057101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be21eb7ff5a888f1e1b13930f8acecabfbd7184c
| 1,345
|
py
|
Python
|
search_insert_position.py
|
ChiragSaini/June-LeetCoding-Challenge
|
8e3192c7c4cfbd5cf8718bdb1b041871585a0c69
|
[
"MIT"
] | null | null | null |
search_insert_position.py
|
ChiragSaini/June-LeetCoding-Challenge
|
8e3192c7c4cfbd5cf8718bdb1b041871585a0c69
|
[
"MIT"
] | null | null | null |
search_insert_position.py
|
ChiragSaini/June-LeetCoding-Challenge
|
8e3192c7c4cfbd5cf8718bdb1b041871585a0c69
|
[
"MIT"
] | null | null | null |
########################
# * First Solution
########################
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
low = 0
high = len(nums)-1
while low < high:
mid = (low+high) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
high = mid-1
elif nums[mid] < target:
low = mid+1
for i in range(len(nums)):
if nums[i] >= target:
return i
return len(nums)
########################
# * Second Solution
########################
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
# ? Border Cases
if target > nums[-1]:
return len(nums)
if target <= nums[0]:
return 0
############
# * Binary Search here
low = 0
high = len(nums)-1
while low < high:
mid = (low+high) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
high = mid-1
elif nums[mid] < target:
low = mid+1
# * Simple Traversal here
for i in range(len(nums)):
if nums[i] >= target:
return i
| 29.23913
| 64
| 0.402974
| 140
| 1,345
| 3.871429
| 0.235714
| 0.077491
| 0.143911
| 0.125461
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0
| 0.01671
| 0.421561
| 1,345
| 46
| 65
| 29.23913
| 0.679949
| 0.069888
| 0
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
079c58dccf5598b4386d5a1d9775120393e99087
| 30
|
py
|
Python
|
experta/matchers/__init__.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
experta/matchers/__init__.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
experta/matchers/__init__.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
from .rete import ReteMatcher
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07cd8c1d2f18d7e0e61d519f5e313de0291e736a
| 25,729
|
py
|
Python
|
fixture/testhelpersm.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
fixture/testhelpersm.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
fixture/testhelpersm.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
import re
#import datetime
from random import randrange
import time
class testHelperSM:
def __init__(self, app):
self.app = app
# def find_region(self):
# wd = self.app.wd
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[2]/label")
# wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']")
# def find_region2(self, reg_name):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr/td[2]/div/div/div[1]/span[2]").click()
# wd.find_element_by_xpath("//div[@id='mCSB_6_container']/div/ul/li[20]/label").click()
# wd.find_element_by_id("aggSearchText").click()
# wd.find_element_by_id("aggSearchText").clear()
# wd.find_element_by_id("aggSearchText").send_keys("%s" % reg_name)
# wd.find_element_by_id("aggSearch").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/span[3]").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[7]/label").click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# def find_region3(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# i = randrange(24)
# wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr[2]/td[1]/div/div/div[1]/span[2]").click()
# self.app.wait_sm_artefact_Block(10)
# if i > 0:
#element = wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i)
#ActionChains(wd).move_to_element(element).perform()
# wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(20)
# self.press_search_button()
# def find_in_container_number(self, range_container_numbers, container_number):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# spicok = []
# i = randrange(1, 4, 1)
# if container_number == 0:
# ct = randrange(1, range_container_numbers, 1)
# else:
# ct = container_number
# if not self.is_sm_advSearch_is_displayed():
# if len(wd.find_elements_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']")) < 2:
# wd.find_element_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']").click()
# else:
# wd.find_element_by_xpath("//div[@id='advSearch']/div[2]/a").click()
# if i > 0 and ct > 0:
# if ct == 1:
# if i < 3:
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# if i == 3:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 2:
# try:
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
# except:
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 3:
# wd.find_element_by_xpath("//div[@id='mCSB_3_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 4:
# wd.find_element_by_xpath("//div[@id='mCSB_4_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 5:
# wd.find_element_by_xpath("//div[@id='mCSB_5_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 6:
# wd.find_element_by_xpath("//div[@id='mCSB_6_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 7:
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 8:
# wd.find_element_by_xpath("//div[@id='mCSB_8_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 9:
# wd.find_element_by_xpath("//div[@id='mCSB_9_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 10:
# wd.find_element_by_xpath("//div[@id='mCSB_10_container']/ul/li[%s]/label" % str(i)).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
# self.press_search_button()
# return i, ct
def press_search_button(self):
wd = self.app.wd
wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']").click()
# def is_sm_advSearch_is_displayed(self):
# try:
# text = self.app.wd.find_element_by_id("advSearchContent").value_of_css_property("display")
# if text == 'block':
# return True
# except:
# return False
# def find_zakazchik_for_purchases_list(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# i = randrange(24)
# wd.find_element_by_xpath(
# "//div[@id='aggregatesPlaceholder']/table/tbody/tr[1]/td[3]/div[2]/div/div[1]/span[2]").click()
# self.app.wait_sm_artefact_Block(10)
# wd.find_element_by_id("aggSearchText").click()
# wd.find_element_by_id("aggSearchText").clear()
# wd.find_element_by_id("aggSearchText").send_keys("администрация")
# wd.find_element_by_id("aggSearch").click()
# self.app.wait_sm_artefact_Block(10)
# if i > 0:
# wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# ! not work
# def search_in_opened_container(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# if not self.is_sm_advSearch_is_displayed():
# if len(wd.find_elements_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']")) < 2:
# wd.find_element_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']").click()
# else:
# wd.find_element_by_xpath("//div[@id='advSearch']/div[2]/a").click()
# i = randrange(1, 24, 1)
# c = len(wd.find_elements_by_css_selector("span.agg-widget_btn"))
# ct = randrange(c)
# wd.find_elements_by_css_selector("span.agg-widget_btn")[ct].click()
# self.app.wait_sm_artefact_Block(10)
# #найти как кликнуть на элементе
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# def get_artef_parametrs(self, ct):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# for row in wd.find_elements_by_xpath("//div[@id='mCSB_%s_container']/ul/li[1]" % ct):
# cells = row.find_elements_by_tag_name("span")
# results = cells[0].find_element_by_tag_name("em").text
# try:
# parametr = cells[3].text
# except:
# parametr = cells[2].text
# return parametr
# def get_artef_param(self, ct):
# wd = self.app.wd
# param = self.get_artef_parametrs(ct)
# return param
# def is_smresult_not_0(self):
# try:
# text = self.get_total_results()
# if text != '0':
# return True
# except:
# return False
# def check_results(self):
# self.app.wait_smBlock(900)
# if self.is_smresult_not_0():
# result = self.get_total_results()
# return result
# else:
# return '0'
# def get_total_results(self):
# wd = self.app.wd
# results = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")
# #clear_result = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")[13:len(results)]
# clear_result = results[13:len(results)]
# return self.clear_result(clear_result)
def create_contact_report_all_in_dif_row_tel_mail(self):
wd = self.app.wd
wd.maximize_window()
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_all_in_dif_row_tel_mail_zakazchiki(self):
wd = self.app.wd
wd.maximize_window()
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='cb-8']").click()
if not wd.find_element_by_id("cb-8").is_selected():
wd.find_element_by_id("cb-8").click()
wd.find_element_by_xpath("//label[@for='cb-9']").click()
if wd.find_element_by_id("cb-9").is_selected():
wd.find_element_by_id("cb-9").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_allinone_tel_mail(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_allinone_tel_mail_zakazchiki(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='cb-8']").click()
if not wd.find_element_by_id("cb-8").is_selected():
wd.find_element_by_id("cb-8").click()
wd.find_element_by_xpath("//label[@for='cb-9']").click()
if wd.find_element_by_id("cb-9").is_selected():
wd.find_element_by_id("cb-9").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_result(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Результаты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//div[@id='divReportSearchResultsSettings']//button[.='Сформировать']").click()
def create_contact_report_statictic(self):
wd = self.app.wd
#добавить выбор чекбоксов
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Статистика']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//div[@id='divReportStatisticsSettings']//button[.='Сформировать']").click()
def create_contact_list_10000(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(900)
wd.find_element_by_xpath("//li[@id='UpdateList']//p[.='Добавить']").click()
wd.find_element_by_xpath("//label[@for='sallResults']").click()
if not wd.find_element_by_id("sallResults").is_selected():
wd.find_element_by_id("sallResults").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").clear()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//div[@id='addOrUpdateEntitiesListSearchDlg']//button[.='Сохранить']").click()
def create_purchases_company_list_50(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(900)
#выбор 50
self.select_all_50()
#создание первых списка по первым 50 компаниям
wd.find_element_by_xpath("//li[@id='UpdateList']//p[.='Добавить']").click()
wd.find_element_by_xpath("//label[@for='scheckedResults']").click()
if not wd.find_element_by_id("scheckedResults").is_selected():
wd.find_element_by_id("scheckedResults").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").clear()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//div[@id='addOrUpdateEntitiesListSearchDlg']//button[.='Сохранить']").click()
def select_all_50(self):
wd = self.app.wd
wd.find_element_by_xpath("//label[@for='allItemsCb']").click()
if not wd.find_element_by_id("allItemsCb").is_selected():
wd.find_element_by_id("allItemsCb").click()
# def clear_result(self, s):
# x = re.sub(" ", "", str(s))
# return x
# def clear_spase_result(self, s):
# x = re.sub(" ", "", str(s))
# return x
def report_is_present_short(self, reestr_ex, report_type_ex, state_ex):
wd = self.app.wd
self.app.wait_smBlock(600)
reestr = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[3]").text.rstrip()
report_type = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[4]").text.rstrip()
state = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[5]").text.rstrip()
if state == "Создан" or state == state_ex:
if report_type == report_type_ex:
if reestr == reestr_ex:
return True
return False
def report_is_present_date(self, cd2):
wd = self.app.wd
date = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[2]").text.rstrip()
exp_date = "Сегодня " + cd2
cd2_hour = cd2[0:2]
cd2_minute = cd2[3:5]
exp_date2 = "Сегодня " + cd2_hour + ":" + str(int(cd2_minute) + 1)
if date == exp_date or date == exp_date2:
return True
return False
def monitoring_is_present(self, cd2, cd3, text, reestr_ex):
wd = self.app.wd
wd.refresh()
self.app.wait_smBlock(600)
date = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[2]").text.rstrip()
exp_date = "Сегодня " + cd3
cd2_hour = cd3[0:2]
cd2_minute = cd3[3:5]
exp_name = text[0:-3] + " " + cd2
exp_date2 = "Сегодня " + cd2_hour + ":" + str(int(cd2_minute) + 1)
exp_date3 = "Сегодня " + cd2_hour + ":" + "0" + str(int(cd2_minute) + 1)
reestr = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]").text.rstrip()
name = wd.find_element_by_xpath("//div[@class='panel_layer']//a[.='%s']" % exp_name).text.rstrip()
#name = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[4]").text.rstrip()
if date == exp_date or date == exp_date2 or date == exp_date3:
if reestr == reestr_ex:
if name == exp_name:
return True
return False
def click_on_monitoring_link(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(600)
exp_name = text[0:-3] + " " + cd2
wd.find_element_by_xpath("//div[@class='panel_layer']//a[.='%s']" % exp_name).click()
def contact_or_purchases_list_is_present(self, cd2, text):
wd = self.app.wd
#проверить время
self.app.wait_smBlock(600)
cd_contact_list = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[2]").text.rstrip()
current_name = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]").text.rstrip()
created_name = text[0:-3] + " " + cd2
cd_contact_list_date = cd_contact_list[0:2]
cd2_date = cd2[0:2]
cd_contact_list_month = cd_contact_list[3:5]
cd2_month = cd2[3:5]
cd_contact_list_year = cd_contact_list[6:10]
cd2_year = cd2[6:10]
if len(cd_contact_list) == 18:
cd_contact_list_hour = cd_contact_list[11:12]
cd_contact_list_minute = cd_contact_list[13:15]
else:
cd_contact_list_hour = cd_contact_list[11:13]
cd_contact_list_minute = cd_contact_list[14:16]
cd2_hour = cd2[11:13]
cd2_minute = cd2[14:16]
if cd_contact_list_date == cd2_date:
if cd_contact_list_month == cd2_month:
if cd_contact_list_year == cd2_year:
if cd_contact_list_hour == cd2_hour or cd_contact_list_hour == cd2_hour[1:2]:
if cd_contact_list_minute == cd2_minute or cd_contact_list_minute == str(int(cd2_minute) + 1):
if current_name.startswith(created_name):
return True
else:
return False
def ensure_link_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h1.clip").text
return header.rstrip()
def ensure_link_type2_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h2").text
return header[0:8]
def open_first_contact_list(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]/div/div[1]/a").click()
def create_report_covladeltsy(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Совладельцы']").click()
wd.find_element_by_xpath("//div[@id='divReportCoownersSettings']//button[.='Сформировать']").click()
wd.find_element_by_css_selector("div.toast-title").click()
def create_report_affelir(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Аффилированность']").click()
wd.find_element_by_xpath("//div[@id='divReportAffilationSettings']//button[.='Сформировать']").click()
def create_report_prices_zakazchik(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Цены']").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
#wd.find_element_by_xpath("//label[@for='cb-2']").click()
#if not wd.find_element_by_id("cb-2").is_selected():
# wd.find_element_by_id("cb-2").click()
#wd.find_element_by_xpath("//label[@for='cb-3']").click()
#if not wd.find_element_by_id("cb-3").is_selected():
# wd.find_element_by_id("cb-3").click()
#wd.find_element_by_xpath("//label[@for='cb-4']").click()
#if not wd.find_element_by_id("cb-4").is_selected():
# wd.find_element_by_id("cb-4").click()
wd.find_element_by_xpath("//div[@id='divReportPricesSettings']//button[.='Сформировать']").click()
def create_report_prices_postavschik(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Цены']").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
#wd.find_element_by_xpath("//label[@for='cb-5']").click()
#if not wd.find_element_by_xpath("//label[@for='cb-5']").is_selected():
# wd.find_element_by_xpath("//label[@for='cb-5']").click()
#wd.find_element_by_xpath("//label[@for='cb-6']").click()
#if not wd.find_element_by_xpath("//label[@for='cb-6']").is_selected():
# wd.find_element_by_xpath("//label[@for='cb-6']").click()
wd.find_element_by_xpath("//div[@id='divReportPricesSettings']//button[.='Сформировать']").click()
def create_report_rnpSuppliers(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Поставщик в РНП']").click()
wd.find_element_by_xpath("//div[@id='divRnpSuppliersSettings']//button[.='Сформировать']").click()
def create_report_RnpParticipantsSettings(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Участник в РНП']").click()
wd.find_element_by_xpath("//div[@id='divRnpParticipantsSettings']//button[.='Сформировать']").click()
def create_report_FasComplaintsSettings(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='ФАС']").click()
wd.find_element_by_xpath("//div[@id='divFasComplaintsSettings']//button[.='Сформировать']").click()
def save_requesr(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(600)
try:
wd.find_element_by_link_text("Сохранить запрос").click()
except:
try:
wd.find_element_by_link_text("Сохранить запрос/Мониторинг").click()
except:
try:
wd.find_element_by_link_text("Сохранить запрос ").click()
except:
wd.find_element_by_link_text("Сохранить запрос/Мониторинг ").click()
wd.find_element_by_id("requestName").click()
wd.find_element_by_id("requestName").clear()
wd.find_element_by_id("requestName").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_id("requestName").click()
wd.find_element_by_xpath("//div[@id='divSaveRequest']//button[.='Сохранить']").click()
def refresh_page(self):
wd = self.app.wd
wd.refresh()
self.app.wait_smBlock(600)
def contact_from_contact_rep_is_present(self):
wd = self.app.wd
pass
def get_old_contact_list(self):
pass
def delete_report(self):
pass
def delete_first_contact_list(self):
wd = self.app.wd
self.app.wait_smBlock(600)
#придумать как найти чекбокс, внизу чушь
list = []
#for row in wd.find_element_by_xpath("//input[@class='row-cb']"):
# cells = row.find_elements_by_tag_name("td")
# id = cells[0].find_element_by_tag_name("input").get_attribute("data-id")
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").click()
if not wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").is_selected():
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").click()
wd.find_element_by_id("btnDel").click()
wd.find_element_by_xpath("//div[@id='dlgYesNo']//button[.='Да']").click()
| 47.122711
| 130
| 0.610245
| 3,615
| 25,729
| 4.050899
| 0.078008
| 0.077028
| 0.163343
| 0.186424
| 0.80183
| 0.783393
| 0.747883
| 0.709847
| 0.666007
| 0.65153
| 0
| 0.022762
| 0.212834
| 25,729
| 545
| 131
| 47.209174
| 0.700291
| 0.375257
| 0
| 0.585106
| 0
| 0.042553
| 0.229844
| 0.186067
| 0.003546
| 0
| 0
| 0
| 0
| 1
| 0.113475
| false
| 0.010638
| 0.010638
| 0
| 0.163121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07ec8ed3603f1cc7b31d1ed635abeed430f55bf6
| 25,009
|
py
|
Python
|
angr_platforms/tricore/rtl.py
|
shahinsba/angr-platforms
|
86f9ea90c396fb5561d0196a2d1a873e573b0294
|
[
"BSD-2-Clause"
] | null | null | null |
angr_platforms/tricore/rtl.py
|
shahinsba/angr-platforms
|
86f9ea90c396fb5561d0196a2d1a873e573b0294
|
[
"BSD-2-Clause"
] | null | null | null |
angr_platforms/tricore/rtl.py
|
shahinsba/angr-platforms
|
86f9ea90c396fb5561d0196a2d1a873e573b0294
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
""" rtl.py
A module for RTL functions.
"""
from pyvex.lifting.util import Type
INT32_MAX_POS = 0x7fffffff #(1 << (32 - 1))-1
UINT32_MAX_POS = 0xffffffff
INT32_MAX_NEG = -0x80000000 #-(1 << (32 - 1))
INT64_MAX_POS = 0x7fffffffffffffff
INT64_MAX_NEG = -0x8000000000000000
SV_MASK = 0x10000000 # bit 29 of PSW
ASV_MASK = 0x4000000 # bit 27 of PSW
def carry(a, b, c):
result_sum = a+b+c
cond_32_lsb_z = (a+b+c) == 0
cond_smaller_a = (result_sum < a)
cond_smaller_b = (result_sum < b)
cond_smaller_c = (result_sum < c)
cond_32_lsb_nz = cond_smaller_a | cond_smaller_b | cond_smaller_c
return cond_32_lsb_z | cond_32_lsb_nz
def overflow(val):
""" Check Overflow for 32-bit values:
- result > 0x7FFFFFFF or result < -0x80000000
"""
return (val >> 32) != 0
def overflow_64(val):
""" Check Overflow for 64-bit values:
- result > 0xFFFFFFFFFFFFFFFF
"""
return (val >> 64) != 0
def advanced_overflow(val):
""" Check advanced overflow for 32-bit values. """
return val[31] ^ val[30]
def advanced_overflow_64(val):
""" Check Advanced Overflow for 64-bit values. """
return val[63] ^ val[62]
def set_usb(psw, C, V, SV, AV, SAV):
""" Set User Status Bits. """
psw = (C << 31) | \
(V << 30) | \
(SV << 29) | \
(AV << 28) | \
(SAV << 27)
return psw
def extend_to_32_bits(val):
val = (val << 31) | (val << 30) | (val << 29) | \
(val << 28) | (val << 27) | (val << 26) | \
(val << 25) | (val << 24) | (val << 23) | \
(val << 22) | (val << 21) | (val << 20) | \
(val << 19) | (val << 18) | (val << 17) | \
(val << 16) | (val << 15) | (val << 14) | \
(val << 13) | (val << 12) | (val << 11) | \
(val << 10) | (val << 9) | (val << 8) | \
(val << 7) | (val << 6) | (val << 5) | \
(val << 4) | (val << 3) | (val << 2) | \
(val << 1) | val
return val
def extend_to_16_bits(val):
val = (val << 15) | (val << 14) | (val << 13) | \
(val << 12) | (val << 11) | (val << 10) | \
(val << 9) | (val << 8) | (val << 7) | \
(val << 6) | (val << 5) | (val << 4) | \
(val << 3) | (val << 2) | (val << 1) | val
return val
def extend_to_8_bits(val):
val = (val << 7) | (val << 6) | (val << 5) | \
(val << 4) | (val << 3) | (val << 2) | \
(val << 1) | val
return val
def extend_to_6_bits(val):
val = (val << 5) | (val << 4) | (val << 3) | \
(val << 2) | (val << 1) | val
return val
def extend_bits(val, bits):
ret = 0
for i in range(bits+1):
ret |= (val << bits-i)
return ret
def ssov(x, y):
""" Saturation on signed overflow. """
max_pos = (1 << (y - 1)) - 1
max_neg = 1 << (y - 1)
cond_x = extend_to_32_bits(x < max_pos)
cond_max_neg = extend_to_32_bits(x > max_neg)
ret = (x & cond_x & ~cond_max_neg) | \
(max_pos & ~cond_x & ~cond_max_neg) | \
(max_neg & ~cond_x & cond_max_neg)
return ret
def ssov16(x):
""" Saturation on signed overflow. """
return x
def ssov32(x, max_pos, max_neg):
""" Saturation on signed overflow.
:param x: Vex Constant (64-bit value).
:param max_pos: Vex Constant (64-bit value).
:param max_neg: Vex Constant (64-bit value).
:return: x or max_pos or max_neg (32-bit value).
"""
cond_max_pos = extend_to_32_bits(x.signed > max_pos)
cond_max_neg = extend_to_32_bits(x.signed < max_neg)
ret = (max_pos & cond_max_pos & ~cond_max_neg) | \
(max_neg & ~cond_max_pos & cond_max_neg) | \
(x & ~cond_max_pos & ~cond_max_neg)
return ret
def ssov64(x):
""" Saturation on signed overflow. """
return x
def suov(x, y):
""" Saturation on unsigned overflow. """
max_pos = (1 << y) - 1
cond_max_pos = extend_to_32_bits(x > max_pos)
ret = (max_pos & cond_max_pos) | (x & ~cond_max_pos)
return ret
def suov16(x):
""" Saturation on unsigned overflow. """
cond_x_neg = extend_to_16_bits((x >> 15) == 1)
ret = x & (cond_x_neg^0xffff)
return ret
def suov32(x):
""" Saturation on unsigned overflow.
:param x: VexValue.
"""
max_pos = (1 << 32) - 1
cond_max_pos = extend_to_32_bits(x > max_pos)
cond_neg = extend_to_32_bits(x < 0)
ret = (max_pos & cond_max_pos & ~cond_neg) | \
(0 & ~cond_max_pos & cond_neg) | \
(x & ~cond_max_pos & ~cond_neg)
return ret
def suov32_sub(x):
""" Saturation on unsigned overflow.
:param x: VexValue.
"""
cond_pos = extend_to_32_bits(x.signed > 0)
ret = x & cond_pos
return ret
def suov32_pos(x):
""" Saturation on unsigned overflow.
:param x: VexValue.
"""
cond_pos = extend_to_32_bits(x > UINT32_MAX_POS)
ret = (UINT32_MAX_POS & cond_pos) | (x & ~cond_pos)
return ret
def suov64(x):
""" Saturation on unsigned overflow. """
cond_x_neg = extend_bits((x[63] == 1), 64)
ret = 0 | (x & ~cond_x_neg)
return ret
def extract_16s(reg, halfword):
""" Return signed halfword value of register.
:param reg: register to extract bits from it.
:param halfword: 0 or 1 for corresponding halfwords.
"""
return (reg >> (halfword * 16)).cast_to(Type.int_16).cast_to(Type.int_32, signed=True)
def sign_extend(val, bits=32):
""" Sign extension. High-order bit of val is left extended.
:param val: VexValue
"""
sign_bit = 1 << (bits - 1)
return (val & (sign_bit - 1)) - (val & sign_bit)
def sign_extend_2(val, width):
""" Sign extension. High-order bit of val is left extended.
:param val: VexValue
:param width: int
"""
cond_sign_bit_1 = extend_to_32_bits((val & ((1 << width)-1)) == 1)
mask_1 = ((0xffffffff >> width) << width) & cond_sign_bit_1
result = val | mask_1
return result
def sign_extend_3(val, width, tmp):
""" Sign extension. High-order bit of val is left extended.
:param val: VexValue
:param width: VexValue
:param tmp: VexValue of 0xffffffff
"""
mask_sign_bit = (1 << (width-1)).cast_to(Type.int_32)
cond_sign_bit_1 = extend_to_32_bits(val & mask_sign_bit == 1)
mask_2 = ((tmp >> width) << width).cast_to(Type.int_32) & cond_sign_bit_1.cast_to(Type.int_32)
result = val | mask_2
return result
def twos_comp(val, bits):
"""compute 2's complement """
if val & (1 << (bits - 1)):
val = val - (1 << bits)
return val
def twos_comp_2(val, bits):
"""compute 2's complement
:param val: VexValue
"""
mask = 1 << (bits - 1)
condition = extend_bits((val & mask) == mask, bits)
val = (val - (1 << bits)) & condition
return val
def get_abs_val(val, bits):
""" Compute absolute value
:param val: VexValue
"""
mask = 1 << (bits - 1)
ones = (mask << 1) - 1
condition = extend_to_32_bits(mask & (val & ones) == 0)
result = (val & condition) | (((val ^ ones) + 1) & ~condition)
return result
def clo32(val):
""" Count Leading Ones starting from bit 32. """
# pylint: disable=line-too-long
first_bit = val[31] ^ 0x0
ctr = (1 & val[31]) + \
(1 & val[30]) + \
(1 & val[29] & val[30]) + \
(1 & val[28] & val[30] & val[29]) + \
(1 & val[27] & val[30] & val[29] & val[28]) + \
(1 & val[26] & val[30] & val[29] & val[28] & val[27]) + \
(1 & val[25] & val[30] & val[29] & val[28] & val[27] & val[26]) + \
(1 & val[24] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25]) + \
(1 & val[23] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24]) + \
(1 & val[22] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23]) + \
(1 & val[21] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22]) + \
(1 & val[20] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21]) + \
(1 & val[19] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20]) + \
(1 & val[18] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19]) + \
(1 & val[17] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18]) + \
(1 & val[16] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17]) + \
(1 & val[15] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16]) + \
(1 & val[14] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15]) + \
(1 & val[13] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14]) + \
(1 & val[12] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13]) + \
(1 & val[11] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12]) + \
(1 & val[10] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11]) + \
(1 & val[9] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10]) + \
(1 & val[8] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9]) + \
(1 & val[7] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8]) + \
(1 & val[6] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7]) + \
(1 & val[5] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6]) + \
(1 & val[4] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5]) + \
(1 & val[3] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4]) + \
(1 & val[2] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3]) + \
(1 & val[1] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3] & val[2]) + \
(1 & val[0] & val[30] & val[29] & val[28] & val[27] & val[26] & val[25] & val[24] & val[23] & val[22] & val[21] & val[20] & val[19] & val[18] & val[17] & val[16] & val[15] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3] & val[2] & val[1])
return ctr * first_bit
def clo16(val):
""" Count Leading Ones starting from bit 16. """
# pylint: disable=line-too-long
first_bit = val[15] ^ 0x0
ctr = (1 & val[15]) + \
(1 & val[14]) + \
(1 & val[13] & val[14]) + \
(1 & val[12] & val[14] & val[13]) + \
(1 & val[11] & val[14] & val[13] & val[12]) + \
(1 & val[10] & val[14] & val[13] & val[12] & val[11]) + \
(1 & val[9] & val[14] & val[13] & val[12] & val[11] & val[10]) + \
(1 & val[8] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9]) + \
(1 & val[7] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8]) + \
(1 & val[6] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7]) + \
(1 & val[5] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6]) + \
(1 & val[4] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5]) + \
(1 & val[3] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4]) + \
(1 & val[2] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3]) + \
(1 & val[1] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3] & val[2]) + \
(1 & val[0] & val[14] & val[13] & val[12] & val[11] & val[10] & val[9] & val[8] & val[7] & val[6] & val[5] & val[4] & val[3] & val[2] & val[1])
return ctr * first_bit
def cls(val, disp):
""" Count Leading Signs starting from bit disp.
disp: 15 or 31
"""
mask = 0x1
ctr = 0
sign_bit = disp # bit: 31 or 15
disp -= 1 # first bit is the sign bit
while disp >= 0:
cond = (val[sign_bit] ^ (((val & (mask << disp)) >> disp) & 0x1) == 0)
ctr += (1 & cond)
disp -= 1
return ctr
def clz16(val):
""" Count Leading Zeros starting from bit 16. """
# pylint: disable=line-too-long
first_bit = val[15] ^ 0x1
ctr = (1 & (val[15]^1)) + \
(1 & (val[14]^1)) + \
(1 & (val[13]^1) & (val[14]^1)) + \
(1 & (val[12]^1) & (val[14]^1) & (val[13]^1)) + \
(1 & (val[11]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1)) + \
(1 & (val[10]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1)) + \
(1 & (val[9] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1)) + \
(1 & (val[8] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1)) + \
(1 & (val[7] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1)) + \
(1 & (val[6] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1)) + \
(1 & (val[5] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1)) + \
(1 & (val[4] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1)) + \
(1 & (val[3] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1)) + \
(1 & (val[2] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1)) + \
(1 & (val[1] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1) & (val[2]^1)) + \
(1 & (val[0] ^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1) & (val[2]^1) & (val[1]^1))
return ctr * first_bit
def clz32(val):
""" Count Leading Zeros starting from bit 32. """
# pylint: disable=line-too-long
first_bit = val[31] ^ 0x1
ctr = (1 & (val[31]^1)) + \
(1 & (val[30]^1)) + \
(1 & (val[29]^1) & (val[30]^1)) + \
(1 & (val[28]^1) & (val[30]^1) & (val[29]^1)) + \
(1 & (val[27]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1)) + \
(1 & (val[26]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1)) + \
(1 & (val[25]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1)) + \
(1 & (val[24]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1)) + \
(1 & (val[23]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1)) + \
(1 & (val[22]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1)) + \
(1 & (val[21]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1)) + \
(1 & (val[20]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1)) + \
(1 & (val[19]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1)) + \
(1 & (val[18]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1)) + \
(1 & (val[17]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1)) + \
(1 & (val[16]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1)) + \
(1 & (val[15]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1)) + \
(1 & (val[14]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1)) + \
(1 & (val[13]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1)) + \
(1 & (val[12]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1)) + \
(1 & (val[11]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1)) + \
(1 & (val[10]^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1)) + \
(1 & (val[9] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1)) + \
(1 & (val[8] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1)) + \
(1 & (val[7] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1)) + \
(1 & (val[6] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1)) + \
(1 & (val[5] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1)) + \
(1 & (val[4] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1)) + \
(1 & (val[3] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1)) + \
(1 & (val[2] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1)) + \
(1 & (val[1] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1) & (val[2]^1)) + \
(1 & (val[0] ^1) & (val[30]^1) & (val[29]^1) & (val[28]^1) & (val[27]^1) & (val[26]^1) & (val[25]^1) & (val[24]^1) & (val[23]^1) & (val[22]^1) & (val[21]^1) & (val[20]^1) & (val[19]^1) & (val[18]^1) & (val[17]^1) & (val[16]^1) & (val[15]^1) & (val[14]^1) & (val[13]^1) & (val[12]^1) & (val[11]^1) & (val[10]^1) & (val[9]^1) & (val[8]^1) & (val[7]^1) & (val[6]^1) & (val[5]^1) & (val[4]^1) & (val[3]^1) & (val[2]^1) & (val[1]^1))
return ctr * first_bit
def reverse16(n):
result = n[0] << 15 | n[1] << 14 | \
n[2] << 13 | n[3] << 12 | \
n[4] << 11 | n[5] << 10 | \
n[6] << 9 | n[7] << 8 | \
n[8] << 7 | n[9] << 6 | \
n[10]<< 5 | n[11] << 4 | \
n[12]<< 3 | n[13] << 2 | \
n[14]<< 1 | n[15]
return result
| 58.296037
| 438
| 0.453397
| 4,437
| 25,009
| 2.501465
| 0.040568
| 0.242905
| 0.021624
| 0.020182
| 0.794126
| 0.765474
| 0.727002
| 0.702856
| 0.684566
| 0.664474
| 0
| 0.1834
| 0.255228
| 25,009
| 428
| 439
| 58.432243
| 0.412488
| 0.075573
| 0
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005239
| 0
| 0
| 1
| 0.124542
| false
| 0
| 0.003663
| 0
| 0.252747
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58025967fa2de611203a39d31c54a0226281a0e6
| 96
|
py
|
Python
|
mockfirestore/__init__.py
|
briggleman/python-mock-firestore
|
04720a7695f1826e9a1251dd2fd33324cecbbd43
|
[
"MIT"
] | null | null | null |
mockfirestore/__init__.py
|
briggleman/python-mock-firestore
|
04720a7695f1826e9a1251dd2fd33324cecbbd43
|
[
"MIT"
] | null | null | null |
mockfirestore/__init__.py
|
briggleman/python-mock-firestore
|
04720a7695f1826e9a1251dd2fd33324cecbbd43
|
[
"MIT"
] | 1
|
2019-10-19T15:29:44.000Z
|
2019-10-19T15:29:44.000Z
|
from .main import DocumentSnapshot, DocumentReference, Query, CollectionReference, MockFirestore
| 96
| 96
| 0.875
| 8
| 96
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072917
| 96
| 1
| 96
| 96
| 0.94382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6af593cd0673cb86dbff9481b451ff7b661ee60d
| 5,082
|
py
|
Python
|
modelproject/modelproject/chxmodelproject.py
|
NumEconCopenhagen/projects-2019-chx
|
84cc06539b113d33464a8974fb4d9636acc3d1ba
|
[
"MIT"
] | null | null | null |
modelproject/modelproject/chxmodelproject.py
|
NumEconCopenhagen/projects-2019-chx
|
84cc06539b113d33464a8974fb4d9636acc3d1ba
|
[
"MIT"
] | 3
|
2019-04-16T12:06:15.000Z
|
2019-05-15T23:53:45.000Z
|
modelproject/modelproject/chxmodelproject.py
|
NumEconCopenhagen/projects-2019-chx
|
84cc06539b113d33464a8974fb4d9636acc3d1ba
|
[
"MIT"
] | 2
|
2020-04-02T10:51:19.000Z
|
2022-01-17T16:44:18.000Z
|
import numpy as np
from scipy import optimize
#%matplotlib inline
import matplotlib.pyplot as plt
def keynesian_cross(T, I, G, NX, a, b):
""" Draws the Keynesian cross with the 45-degree line and
the planned total spending as a function of total production.
Args:
T (float): Taxs
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
Return: Figure
"""
# The data vector to be plotted for production and aggregate expenditure:
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - T) + I + G + NX)
degree = Y_arrey
# The figure
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue',linewidth=3)
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='darkorange',linewidth=3)
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
def cross_equalibrium(T, I, G, NX, a, b):
""" The equalibrium for the Keynesian cross where aggregate expenditure equals total production
Args:
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
Returns:
Result: Production in equalibrium, Y (float)
"""
return 1/(1-b) * (I + G + NX + a - b*T)
def keynesian_cross_NXshift(T, I, G, NX, a, b, delta_NX):
""" Steady state for the Keynesian cross where aggregate expenditure equals total production
Args:
AD (float): Aggregate expenditure
Y (float): Total production
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
delta_NX (float): Net export shift in
Returns:
Result: Figure
"""
# The equation setup
NX2 = NX + delta_NX
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - T) + I + G + NX)
AD2_arrey = (a + b * (Y_arrey - T) + I + G + NX2)
degree = Y_arrey
# The figure
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue')
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='orange')
ax.plot(Y_arrey, AD2_arrey, label="AD'=C+I+G+NX'", color='red')
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
def num_opt(Y_goal,T,I,G,a,b):
""" Numerical optimazation to calculate value of NX to optain production goal
Args:
Y_goal (float): Production goal
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
Returns:
Result: NX (float)
"""
# Object function to be optimized:
obj = lambda NX: (cross_equalibrium(T, I, G, NX, a, b) - Y_goal)**2
# Initial guess
x0 = 10
return optimize.minimize(obj,x0)
def keynesian_cross_NXshift_t(k, t, I, G, NX, a, b, delta_NX):
""" Steady state for the Keynesian cross where aggregate expenditure equals total production
Args:
AD (float): Aggregate expenditure
Y (float): Total production
k (float): Base tax
t (float): Marginal tax rate
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
delta_NX (float): Net export shift in
Returns:
Result: Figure
"""
# The equation setup and generating of data arreys:
NX2 = NX + delta_NX
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - (k + b*t)) + I + G + NX)
AD2_arrey = (a + b * (Y_arrey - (k + b*t)) + I + G + NX2)
degree = Y_arrey
# The figure:
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue')
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='orange')
ax.plot(Y_arrey, AD2_arrey, label="AD'=C+I+G+NX'", color='red')
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
| 32.164557
| 99
| 0.613341
| 758
| 5,082
| 4.022427
| 0.171504
| 0.037389
| 0.018367
| 0.05018
| 0.795015
| 0.768777
| 0.766481
| 0.766481
| 0.751394
| 0.751394
| 0
| 0.018421
| 0.252263
| 5,082
| 158
| 100
| 32.164557
| 0.783947
| 0.423259
| 0
| 0.727273
| 0
| 0
| 0.09375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.045455
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed0a681f73db2b21175cc869d5444af5418367f8
| 104
|
py
|
Python
|
core/messenger/exceptions.py
|
anthill-arch/platform
|
ff45dc71b2f3141bbd95baaf4da7ff1d2ac24ca0
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
core/messenger/exceptions.py
|
anthill-arch/platform
|
ff45dc71b2f3141bbd95baaf4da7ff1d2ac24ca0
|
[
"MIT"
] | null | null | null |
core/messenger/exceptions.py
|
anthill-arch/platform
|
ff45dc71b2f3141bbd95baaf4da7ff1d2ac24ca0
|
[
"MIT"
] | null | null | null |
class NotAuthenticatedError(Exception):
pass
class AuthenticationFailedError(Exception):
pass
| 14.857143
| 43
| 0.788462
| 8
| 104
| 10.25
| 0.625
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 104
| 6
| 44
| 17.333333
| 0.931818
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ed2c8027b3616dd0462d0630ba3265e706057df8
| 89
|
py
|
Python
|
final_project/machinetranslation/__init__.py
|
yl-miao/xzceb-flask_eng_fr
|
916316b27fa447396a99314f41c643109ce22a7e
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/__init__.py
|
yl-miao/xzceb-flask_eng_fr
|
916316b27fa447396a99314f41c643109ce22a7e
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/__init__.py
|
yl-miao/xzceb-flask_eng_fr
|
916316b27fa447396a99314f41c643109ce22a7e
|
[
"Apache-2.0"
] | null | null | null |
from . import translator
#import sys
#sys.path.append("./tests")
from . import unit_tests
| 22.25
| 27
| 0.752809
| 13
| 89
| 5.076923
| 0.615385
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 89
| 4
| 28
| 22.25
| 0.835443
| 0.404494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed2d8840bdda81ffe9f810a0e4a80dda3ea7871d
| 49
|
py
|
Python
|
protobuf/__main__.py
|
Axonny/Protobuf
|
709e7d77f94e7482021c17fc18c441a1f2af5a1e
|
[
"MIT"
] | null | null | null |
protobuf/__main__.py
|
Axonny/Protobuf
|
709e7d77f94e7482021c17fc18c441a1f2af5a1e
|
[
"MIT"
] | null | null | null |
protobuf/__main__.py
|
Axonny/Protobuf
|
709e7d77f94e7482021c17fc18c441a1f2af5a1e
|
[
"MIT"
] | null | null | null |
from protobuf.generate_class import main
main()
| 12.25
| 40
| 0.816327
| 7
| 49
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 3
| 41
| 16.333333
| 0.906977
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
71f7088fe598802b25d2f6b8197de7cf0d4aa2e0
| 103,130
|
py
|
Python
|
quarkchain/cluster/tests/test_shard_state.py
|
HAOYUatHZ/pyquarkchain
|
b2c7c02e4415aa26917c2cbb5e7571c9fef16c5b
|
[
"MIT"
] | 1
|
2018-10-23T05:48:42.000Z
|
2018-10-23T05:48:42.000Z
|
quarkchain/cluster/tests/test_shard_state.py
|
skji/pyquarkchain
|
090f9981b89b8873daaed36171a9bc9f27b10473
|
[
"MIT"
] | null | null | null |
quarkchain/cluster/tests/test_shard_state.py
|
skji/pyquarkchain
|
090f9981b89b8873daaed36171a9bc9f27b10473
|
[
"MIT"
] | null | null | null |
import random
import unittest
from fractions import Fraction
from quarkchain.cluster.shard_state import ShardState
from quarkchain.cluster.tests.test_utils import (
get_test_env,
create_transfer_transaction,
create_contract_creation_transaction,
contract_creation_tx,
)
from quarkchain.config import ConsensusType
from quarkchain.core import CrossShardTransactionDeposit, CrossShardTransactionList
from quarkchain.core import Identity, Address, TokenBalanceMap
from quarkchain.diff import EthDifficultyCalculator
from quarkchain.evm import opcodes
from quarkchain.genesis import GenesisManager
def create_default_shard_state(
env, shard_id=0, diff_calc=None, posw_override=False, no_coinbase=False
):
genesis_manager = GenesisManager(env.quark_chain_config)
shard_size = next(iter(env.quark_chain_config.shards.values())).SHARD_SIZE
full_shard_id = shard_size | shard_id
if posw_override:
posw_config = env.quark_chain_config.shards[full_shard_id].POSW_CONFIG
posw_config.ENABLED = True
if no_coinbase:
env.quark_chain_config.shards[full_shard_id].COINBASE_AMOUNT = 0
shard_state = ShardState(env=env, full_shard_id=full_shard_id, diff_calc=diff_calc)
shard_state.init_genesis_state(genesis_manager.create_root_block())
return shard_state
class TestShardState(unittest.TestCase):
def setUp(self):
super().setUp()
config = get_test_env().quark_chain_config
self.root_coinbase = config.ROOT.COINBASE_AMOUNT
self.shard_coinbase = next(iter(config.shards.values())).COINBASE_AMOUNT
# to make test verification easier, assume following tax rate
assert config.REWARD_TAX_RATE == 0.5
self.tax_rate = config.reward_tax_rate # type: Fraction
self.genesis_token = config.genesis_token # type: int
self.genesis_token_str = config.GENESIS_TOKEN # type: str
def get_after_tax_reward(self, value: int) -> int:
return value * self.tax_rate.numerator // self.tax_rate.denominator
def test_shard_state_simple(self):
env = get_test_env()
state = create_default_shard_state(env)
self.assertEqual(state.root_tip.height, 0)
self.assertEqual(state.header_tip.height, 0)
# make sure genesis minor block has the right coinbase after-tax
self.assertEqual(
state.header_tip.coinbase_amount_map.balance_map,
{self.genesis_token: 2500000000000000000},
)
def test_init_genesis_state(self):
env = get_test_env()
state = create_default_shard_state(env)
genesis_header = state.header_tip
root_block = state.root_tip.create_block_to_append(nonce=1234)
root_block.header.height = 0
root_block.finalize()
new_genesis_block, _ = state.init_genesis_state(root_block)
self.assertNotEqual(
new_genesis_block.header.get_hash(), genesis_header.get_hash()
)
# header tip is still the old genesis header
self.assertEqual(state.header_tip, genesis_header)
block = new_genesis_block.create_block_to_append()
state.finalize_and_add_block(block)
# extending new_genesis_block doesn't change header_tip due to root chain first consensus
self.assertEqual(state.header_tip, genesis_header)
self.assertEqual(genesis_header, state.db.get_minor_block_by_height(0).header)
# extending the root block will change the header_tip
root_block = root_block.create_block_to_append(nonce=1234).finalize()
root_block.finalize()
self.assertTrue(state.add_root_block(root_block))
# ideally header_tip should be block.header but we don't track tips on fork chains for the moment
# and thus it reverted all the way back to genesis
self.assertEqual(state.header_tip, new_genesis_block.header)
self.assertEqual(new_genesis_block, state.db.get_minor_block_by_height(0))
def test_blocks_with_incorrect_version(self):
env = get_test_env()
state = create_default_shard_state(env=env)
root_block = state.root_tip.create_block_to_append()
root_block.header.version = 1
with self.assertRaisesRegexp(ValueError, "incorrect root block version"):
state.add_root_block(root_block.finalize())
root_block.header.version = 0
state.add_root_block(root_block.finalize())
shard_block = state.create_block_to_mine()
shard_block.header.version = 1
with self.assertRaisesRegexp(ValueError, "incorrect minor block version"):
state.finalize_and_add_block(shard_block)
shard_block.header.version = 0
state.finalize_and_add_block(shard_block)
def test_gas_price(self):
id_list = [Identity.create_random_identity() for _ in range(5)]
acc_list = [Address.create_from_identity(i, full_shard_key=0) for i in id_list]
env = get_test_env(genesis_account=acc_list[0], genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
# 5 tx per block, make 3 blocks
for _ in range(3):
for j in range(5):
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id_list[j].get_key(),
from_address=acc_list[j],
to_address=random.choice(acc_list),
value=0,
gas_price=42 if j == 0 else 0,
)
)
b = state.create_block_to_mine(address=acc_list[1])
state.finalize_and_add_block(b)
# for testing purposes, update percentile to take max gas price
state.gas_price_suggestion_oracle.percentile = 100
gas_price = state.gas_price()
self.assertEqual(gas_price, 42)
# results should be cached (same header). updating oracle shouldn't take effect
state.gas_price_suggestion_oracle.percentile = 50
gas_price = state.gas_price()
self.assertEqual(gas_price, 42)
def test_estimate_gas(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx_gen = lambda data: create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
data=data,
)
tx = tx_gen(b"")
estimate = state.estimate_gas(tx, acc1)
self.assertEqual(estimate, 21000)
tx = tx_gen(b"12123478123412348125936583475758")
estimate = state.estimate_gas(tx, acc1)
self.assertEqual(estimate, 23176)
def test_execute_tx(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
)
# adding this line to make sure `execute_tx` would reset `gas_used`
state.evm_state.gas_used = state.evm_state.gas_limit
res = state.execute_tx(tx, acc1)
self.assertEqual(res, b"")
def test_add_tx_incorrect_from_shard_id(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=1)
acc2 = Address.create_random_account(full_shard_key=1)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# state is shard 0 but tx from shard 1
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
)
self.assertFalse(state.add_tx(tx))
self.assertIsNone(state.execute_tx(tx, acc1))
def test_one_tx(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
gas=50000,
)
state.evm_state.gas_used = state.evm_state.gas_limit
self.assertTrue(state.add_tx(tx))
block, i = state.get_transaction_by_hash(tx.get_hash())
self.assertEqual(block.tx_list[0], tx)
self.assertEqual(block.header.create_time, 0)
self.assertEqual(i, 0)
# tx claims to use more gas than the limit and thus not included
b1 = state.create_block_to_mine(address=acc3, gas_limit=49999)
self.assertEqual(len(b1.tx_list), 0)
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 1)
# Should succeed
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b1.header)
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token),
10000000 - opcodes.GTXCOST - 12345,
)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token), 12345
)
# shard miner only receives a percentage of reward because of REWARD_TAX_RATE
self.assertEqual(
state.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXCOST + self.shard_coinbase),
)
# Check receipts
self.assertEqual(len(state.evm_state.receipts), 1)
self.assertEqual(state.evm_state.receipts[0].state_root, b"\x01")
self.assertEqual(state.evm_state.receipts[0].gas_used, 21000)
block, i = state.get_transaction_by_hash(tx.get_hash())
self.assertEqual(block, b1)
self.assertEqual(i, 0)
# Check receipts in storage
resp = state.get_transaction_receipt(tx.get_hash())
self.assertIsNotNone(resp)
block, i, r = resp
self.assertEqual(block, b1)
self.assertEqual(i, 0)
self.assertEqual(r.success, b"\x01")
self.assertEqual(r.gas_used, 21000)
# Check Account has full_shard_key
self.assertEqual(
state.evm_state.get_full_shard_key(acc2.recipient), acc2.full_shard_key
)
tx_list, _ = state.db.get_transactions_by_address(acc1)
self.assertEqual(tx_list[0].value, 12345)
tx_list, _ = state.db.get_transactions_by_address(acc2)
self.assertEqual(tx_list[0].value, 12345)
def test_duplicated_tx(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
)
self.assertTrue(state.add_tx(tx))
self.assertFalse(state.add_tx(tx)) # already in tx_queue
self.assertEqual(len(state.tx_queue), 1)
self.assertEqual(len(state.tx_dict), 1)
block, i = state.get_transaction_by_hash(tx.get_hash())
self.assertEqual(len(block.tx_list), 1)
self.assertEqual(block.tx_list[0], tx)
self.assertEqual(block.header.create_time, 0)
self.assertEqual(i, 0)
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 1)
# Should succeed
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b1.header)
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token),
10000000 - opcodes.GTXCOST - 12345,
)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token), 12345
)
self.assertEqual(
state.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXCOST + self.shard_coinbase),
)
# Check receipts
self.assertEqual(len(state.evm_state.receipts), 1)
self.assertEqual(state.evm_state.receipts[0].state_root, b"\x01")
self.assertEqual(state.evm_state.receipts[0].gas_used, 21000)
block, i = state.get_transaction_by_hash(tx.get_hash())
self.assertEqual(block, b1)
self.assertEqual(i, 0)
# tx already confirmed
self.assertTrue(state.db.contain_transaction_hash(tx.get_hash()))
self.assertFalse(state.add_tx(tx))
def test_add_invalid_tx_fail(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=999999999999999999999, # insane
)
self.assertFalse(state.add_tx(tx))
self.assertEqual(len(state.tx_queue), 0)
def test_add_non_neighbor_tx_fail(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=3) # not acc1's neighbor
acc3 = Address.create_random_account(full_shard_key=8) # acc1's neighbor
env = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=0,
gas=1000000,
)
self.assertFalse(state.add_tx(tx))
self.assertEqual(len(state.tx_queue), 0)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc3,
value=0,
gas=1000000,
)
self.assertTrue(state.add_tx(tx))
self.assertEqual(len(state.tx_queue), 1)
def test_exceeding_xshard_limit(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=1)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
# a huge number to make xshard tx limit become 0 so that no xshard tx can be
# included in the block
env.quark_chain_config.MAX_NEIGHBORS = 10 ** 18
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
# add a xshard tx with large startgas
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
gas=state.get_xshard_gas_limit() + 1,
)
self.assertFalse(state.add_tx(tx))
# xshard tx
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
gas=50000,
)
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 1)
# inshard tx
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc3,
value=12345,
gas=50000,
)
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 1)
def test_two_tx_in_one_block(self):
id1 = Identity.create_random_identity()
id2 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id2, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=2000000 + opcodes.GTXCOST
)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=1000000,
)
)
b0 = state.create_block_to_mine(address=acc3)
state.finalize_and_add_block(b0)
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token), 1000000
)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token), 1000000
)
self.assertEqual(
state.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXCOST + self.shard_coinbase),
)
# Check Account has full_shard_key
self.assertEqual(
state.evm_state.get_full_shard_key(acc2.recipient), acc2.full_shard_key
)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=Address(
acc2.recipient, acc2.full_shard_key + 2
), # set a different full shard id
value=12345,
gas=50000,
)
)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id2.get_key(),
from_address=acc2,
to_address=acc1,
value=54321,
gas=40000,
)
)
# Inshard gas limit is 40000 - 20000
b1 = state.create_block_to_mine(
address=acc3, gas_limit=40000, xshard_gas_limit=20000
)
self.assertEqual(len(b1.tx_list), 0)
b1 = state.create_block_to_mine(
address=acc3, gas_limit=40000, xshard_gas_limit=0
)
self.assertEqual(len(b1.tx_list), 1)
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 2)
# Should succeed
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b1.header)
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token),
1000000 - opcodes.GTXCOST - 12345 + 54321,
)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token),
1000000 - opcodes.GTXCOST + 12345 - 54321,
)
# 2 block rewards: 3 tx, 2 block rewards
self.assertEqual(
state.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXCOST * 3 + self.shard_coinbase * 2),
)
# Check receipts
self.assertEqual(len(state.evm_state.receipts), 2)
self.assertEqual(state.evm_state.receipts[0].state_root, b"\x01")
self.assertEqual(state.evm_state.receipts[0].gas_used, 21000)
self.assertEqual(state.evm_state.receipts[1].state_root, b"\x01")
self.assertEqual(state.evm_state.receipts[1].gas_used, 42000)
block, i = state.get_transaction_by_hash(b1.tx_list[0].get_hash())
self.assertEqual(block, b1)
self.assertEqual(i, 0)
block, i = state.get_transaction_by_hash(b1.tx_list[1].get_hash())
self.assertEqual(block, b1)
self.assertEqual(i, 1)
# Check acc2 full_shard_key doesn't change
self.assertEqual(
state.evm_state.get_full_shard_key(acc2.recipient), acc2.full_shard_key
)
def test_fork_does_not_confirm_tx(self):
"""Tx should only be confirmed and removed from tx queue by the best chain"""
id1 = Identity.create_random_identity()
id2 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id2, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=2000000 + opcodes.GTXCOST
)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=1000000,
)
)
b0 = state.create_block_to_mine(address=acc3)
b1 = state.create_block_to_mine(address=acc3)
b0.tx_list = [] # make b0 empty
state.finalize_and_add_block(b0)
# tx is added back to queue in the end of create_block_to_mine
self.assertEqual(len(state.tx_queue), 1)
self.assertEqual(len(b1.tx_list), 1)
state.finalize_and_add_block(b1)
# b1 is a fork and does not remove the tx from queue
self.assertEqual(len(state.tx_queue), 1)
b2 = state.create_block_to_mine(address=acc3)
state.finalize_and_add_block(b2)
self.assertEqual(len(state.tx_queue), 0)
def test_revert_fork_put_tx_back_to_queue(self):
"""Tx in the reverted chain should be put back to the queue"""
id1 = Identity.create_random_identity()
id2 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id2, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=2000000 + opcodes.GTXCOST
)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=1000000,
)
)
b0 = state.create_block_to_mine(address=acc3)
b1 = state.create_block_to_mine(address=acc3)
state.finalize_and_add_block(b0)
self.assertEqual(len(state.tx_queue), 0)
b1.tx_list = [] # make b1 empty
state.finalize_and_add_block(b1)
self.assertEqual(len(state.tx_queue), 0)
b2 = b1.create_block_to_append()
state.finalize_and_add_block(b2)
# now b1-b2 becomes the best chain and we expect b0 to be reverted and put the tx back to queue
self.assertEqual(len(state.tx_queue), 1)
b3 = b0.create_block_to_append()
state.finalize_and_add_block(b3)
self.assertEqual(len(state.tx_queue), 1)
b4 = b3.create_block_to_append()
state.finalize_and_add_block(b4)
# b0-b3-b4 becomes the best chain
self.assertEqual(len(state.tx_queue), 0)
def test_stale_block_count(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
b1 = state.create_block_to_mine(address=acc3)
b2 = state.create_block_to_mine(address=acc3)
b2.header.create_time += 1
state.finalize_and_add_block(b1)
self.assertEqual(state.db.get_block_count_by_height(1), 1)
state.finalize_and_add_block(b2)
self.assertEqual(state.db.get_block_count_by_height(1), 2)
def test_xshard_tx_sent(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=1)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
env1 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state1 = create_default_shard_state(env=env1, shard_id=1)
# Add a root block to update block gas limit so that xshard tx can be included
root_block = (
state.root_tip.create_block_to_append()
.add_minor_block_header(state.header_tip)
.add_minor_block_header(state1.header_tip)
.finalize()
)
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
)
state.add_tx(tx)
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 1)
self.assertEqual(state.evm_state.gas_used, 0)
# Should succeed
state.finalize_and_add_block(b1)
self.assertEqual(len(state.evm_state.xshard_list), 1)
self.assertEqual(
state.evm_state.xshard_list[0],
CrossShardTransactionDeposit(
tx_hash=tx.get_hash(),
from_address=acc1,
to_address=acc2,
value=888888,
gas_price=1,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
),
)
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token),
10000000 - 888888 - (opcodes.GTXCOST + opcodes.GTXXSHARDCOST),
)
# Make sure the xshard gas is not used by local block
self.assertEqual(
state.evm_state.gas_used, opcodes.GTXCOST + opcodes.GTXXSHARDCOST
)
# GTXXSHARDCOST is consumed by remote shard
self.assertEqual(
state.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXCOST + self.shard_coinbase),
)
def test_xshard_tx_insufficient_gas(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=1)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
state.add_tx(
create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=888888,
gas=opcodes.GTXCOST,
)
)
b1 = state.create_block_to_mine(address=acc3)
self.assertEqual(len(b1.tx_list), 0)
self.assertEqual(len(state.tx_queue), 0)
def test_xshard_tx_received(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=16)
acc3 = Address.create_random_account(full_shard_key=0)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env1 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=16)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.add_minor_block_header(state1.header_tip)
.finalize()
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
# Add one block in shard 0
b0 = state0.create_block_to_mine()
state0.finalize_and_add_block(b0)
b1 = state1.get_tip().create_block_to_append()
b1.header.hash_prev_root_block = root_block.header.get_hash()
tx = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx)
# Add a x-shard tx from remote peer
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b1.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=tx.get_hash(),
from_address=acc2,
to_address=acc1,
value=888888,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
)
]
),
)
# Create a root block containing the block with the x-shard tx
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b0.header)
.add_minor_block_header(b1.header)
.finalize()
)
state0.add_root_block(root_block)
# Add b0 and make sure all x-shard tx's are added
b2 = state0.create_block_to_mine(address=acc3)
state0.finalize_and_add_block(b2)
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 888888,
)
# Half collected by root
self.assertEqual(
state0.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXXSHARDCOST * 2 + self.shard_coinbase),
)
# X-shard gas used
evmState0 = state0.evm_state
self.assertEqual(evmState0.xshard_receive_gas_used, opcodes.GTXXSHARDCOST)
def test_xshard_tx_received_exclude_non_neighbor(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=3)
acc3 = Address.create_random_account(full_shard_key=0)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env1 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=3)
b0 = state0.get_tip()
b1 = state1.get_tip().create_block_to_append()
tx = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx)
# Create a root block containing the block with the x-shard tx
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b0.header)
.add_minor_block_header(b1.header)
.finalize()
)
state0.add_root_block(root_block)
b2 = state0.create_block_to_mine(address=acc3)
state0.finalize_and_add_block(b2)
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token), 10000000
)
# Half collected by root
self.assertEqual(
state0.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(self.shard_coinbase),
)
# No xshard tx is processed on the receiving side due to non-neighbor
evm_state0 = state0.evm_state
self.assertEqual(evm_state0.xshard_receive_gas_used, 0)
def test_xshard_from_root_block(self):
id1 = Identity.create_random_identity()
id2 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id2, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
# Add a root block to update block gas limit so that xshard tx can be included
root_block = (
state.root_tip.create_block_to_append()
.add_minor_block_header(state.header_tip)
.finalize(
coinbase_tokens={env.quark_chain_config.genesis_token: 1000000},
coinbase_address=acc2,
)
)
state.add_root_block(root_block)
b0 = state.create_block_to_mine()
state.finalize_and_add_block(b0)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token), 1000000
)
def test_xshard_for_two_root_blocks(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=1)
acc3 = Address.create_random_account(full_shard_key=0)
env0 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
env1 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=1)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.add_minor_block_header(state1.header_tip)
.finalize()
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
# Add one block in shard 0
b0 = state0.create_block_to_mine()
state0.finalize_and_add_block(b0)
b1 = state1.get_tip().create_block_to_append()
b1.header.hash_prev_root_block = root_block.header.get_hash()
tx = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
)
b1.add_tx(tx)
# Add a x-shard tx from state1
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b1.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=tx.get_hash(),
from_address=acc2,
to_address=acc1,
value=888888,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
)
]
),
)
# Create a root block containing the block with the x-shard tx
root_block0 = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b0.header)
.add_minor_block_header(b1.header)
.finalize()
)
state0.add_root_block(root_block0)
b2 = state0.get_tip().create_block_to_append()
state0.finalize_and_add_block(b2)
b3 = b1.create_block_to_append()
b3.header.hash_prev_root_block = root_block.header.get_hash()
# Add a x-shard tx from state1
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b3.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=bytes(32),
from_address=acc2,
to_address=acc1,
value=385723,
gas_price=3,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
)
]
),
)
root_block1 = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b2.header)
.add_minor_block_header(b3.header)
.finalize()
)
state0.add_root_block(root_block1)
# Test x-shard gas limit when create_block_to_mine
b6 = state0.create_block_to_mine(address=acc3, gas_limit=opcodes.GTXXSHARDCOST)
self.assertEqual(b6.header.hash_prev_root_block, root_block1.header.get_hash())
# There are two x-shard txs: one is root block coinbase with zero gas, and another is from shard 1
b7 = state0.create_block_to_mine(
address=acc3, gas_limit=2 * opcodes.GTXXSHARDCOST
)
self.assertEqual(b7.header.hash_prev_root_block, root_block1.header.get_hash())
b8 = state0.create_block_to_mine(
address=acc3, gas_limit=3 * opcodes.GTXXSHARDCOST
)
self.assertEqual(b8.header.hash_prev_root_block, root_block1.header.get_hash())
# Add b0 and make sure all x-shard tx's are added
b4 = state0.create_block_to_mine(address=acc3)
self.assertEqual(b4.header.hash_prev_root_block, root_block1.header.get_hash())
state0.finalize_and_add_block(b4)
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 888888 + 385723,
)
# Half collected by root
self.assertEqual(
state0.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(
opcodes.GTXXSHARDCOST * (2 + 3) + self.shard_coinbase
),
)
# Check gas used for receiving x-shard tx
self.assertEqual(state0.evm_state.gas_used, 18000)
self.assertEqual(state0.evm_state.xshard_receive_gas_used, 18000)
def test_xshard_gas_limit(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=16)
acc3 = Address.create_random_account(full_shard_key=0)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env1 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=16)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.add_minor_block_header(state1.header_tip)
.finalize()
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
# Add one block in shard 1 with 2 x-shard txs
b1 = state1.get_tip().create_block_to_append()
b1.header.hash_prev_root_block = root_block.header.get_hash()
tx0 = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx0)
tx1 = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=111111,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx1)
# Add a x-shard tx from remote peer
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b1.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=tx0.get_hash(),
from_address=acc2,
to_address=acc1,
value=888888,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
),
CrossShardTransactionDeposit(
tx_hash=tx1.get_hash(),
from_address=acc2,
to_address=acc1,
value=111111,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
),
]
),
)
# Create a root block containing the block with the x-shard tx
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b1.header)
.finalize(
coinbase_tokens={env0.quark_chain_config.genesis_token: 1000000},
coinbase_address=acc1,
)
)
state0.add_root_block(root_block)
# Add b0 and make sure one x-shard tx's are added
b2 = state0.create_block_to_mine(
address=acc3, xshard_gas_limit=opcodes.GTXXSHARDCOST
)
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 888888,
)
# Half collected by root
self.assertEqual(
state0.get_token_balance(acc3.recipient, self.genesis_token),
self.get_after_tax_reward(opcodes.GTXXSHARDCOST * 2 + self.shard_coinbase),
)
# X-shard gas used
evmState0 = state0.evm_state
self.assertEqual(evmState0.xshard_receive_gas_used, opcodes.GTXXSHARDCOST)
# Add b2 and make sure all x-shard tx's are added
b2 = state0.create_block_to_mine(
address=acc3, xshard_gas_limit=opcodes.GTXXSHARDCOST
)
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 888888 + 111111,
)
# Add b3 and make sure no x-shard tx's are added
b3 = state0.create_block_to_mine(
address=acc3, xshard_gas_limit=opcodes.GTXXSHARDCOST
)
state0.finalize_and_add_block(b3, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 888888 + 111111,
)
b4 = state0.create_block_to_mine(
address=acc3, xshard_gas_limit=opcodes.GTXXSHARDCOST
)
state0.finalize_and_add_block(b4, xshard_gas_limit=opcodes.GTXXSHARDCOST)
self.assertNotEqual(
b2.meta.xshard_tx_cursor_info, b3.meta.xshard_tx_cursor_info
)
self.assertEqual(b3.meta.xshard_tx_cursor_info, b4.meta.xshard_tx_cursor_info)
b5 = state0.create_block_to_mine(
address=acc3,
gas_limit=opcodes.GTXXSHARDCOST,
xshard_gas_limit=2 * opcodes.GTXXSHARDCOST,
)
with self.assertRaises(ValueError):
# xsahrd_gas_limit should be smaller than gas_limit
state0.finalize_and_add_block(
b5,
gas_limit=opcodes.GTXXSHARDCOST,
xshard_gas_limit=2 * opcodes.GTXXSHARDCOST,
)
b6 = state0.create_block_to_mine(
address=acc3, xshard_gas_limit=opcodes.GTXXSHARDCOST
)
with self.assertRaises(ValueError):
# xshard_gas_limit should be gas_limit // 2
state0.finalize_and_add_block(b6)
def test_xshard_gas_limit_from_multiple_shards(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=16)
acc3 = Address.create_from_identity(id1, full_shard_key=8)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env1 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env2 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=16)
state2 = create_default_shard_state(env=env1, shard_id=8)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.add_minor_block_header(state1.header_tip)
.add_minor_block_header(state2.header_tip)
.finalize()
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
state2.add_root_block(root_block)
# Add one block in shard 1 with 2 x-shard txs
b1 = state1.get_tip().create_block_to_append()
b1.header.hash_prev_root_block = root_block.header.get_hash()
tx0 = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=888888,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx0)
tx1 = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=111111,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b1.add_tx(tx1)
# Add a x-shard tx from remote peer
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b1.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=tx0.get_hash(),
from_address=acc2,
to_address=acc1,
value=888888,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
),
CrossShardTransactionDeposit(
tx_hash=tx1.get_hash(),
from_address=acc2,
to_address=acc1,
value=111111,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
),
]
),
)
# Add one block in shard 1 with 2 x-shard txs
b2 = state2.get_tip().create_block_to_append()
b2.header.hash_prev_root_block = root_block.header.get_hash()
tx3 = create_transfer_transaction(
shard_state=state1,
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=12345,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
gas_price=2,
)
b2.add_tx(tx3)
# Add a x-shard tx from remote peer
state0.add_cross_shard_tx_list_by_minor_block_hash(
h=b2.header.get_hash(),
tx_list=CrossShardTransactionList(
tx_list=[
CrossShardTransactionDeposit(
tx_hash=tx3.get_hash(),
from_address=acc3,
to_address=acc1,
value=12345,
gas_price=2,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
)
]
),
)
# Create a root block containing the block with the x-shard tx
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(b2.header)
.add_minor_block_header(b1.header)
.finalize(
coinbase_tokens={env0.quark_chain_config.genesis_token: 1000000},
coinbase_address=acc1,
)
)
state0.add_root_block(root_block)
# Add b0 and make sure one x-shard tx's are added
b2 = state0.create_block_to_mine(xshard_gas_limit=opcodes.GTXXSHARDCOST)
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 12345,
)
# X-shard gas used
evmState0 = state0.evm_state
self.assertEqual(evmState0.xshard_receive_gas_used, opcodes.GTXXSHARDCOST)
# Add b2 and make sure all x-shard tx's are added
b2 = state0.create_block_to_mine(xshard_gas_limit=opcodes.GTXXSHARDCOST)
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 12345 + 888888,
)
# Add b3 and make sure no x-shard tx's are added
b3 = state0.create_block_to_mine(xshard_gas_limit=opcodes.GTXXSHARDCOST)
state0.finalize_and_add_block(b3, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000 + 12345 + 888888 + 111111,
)
def test_xshard_rootblock_coinbase(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=16)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
env1 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=16)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.add_minor_block_header(state1.header_tip)
.finalize()
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
# Create a root block containing the block with the x-shard tx
root_block = state0.root_tip.create_block_to_append().finalize(
coinbase_tokens={env0.quark_chain_config.genesis_token: 1000000},
coinbase_address=acc1,
)
state0.add_root_block(root_block)
state1.add_root_block(root_block)
# Add b0 and make sure one x-shard tx's are added
b2 = state0.create_block_to_mine(xshard_gas_limit=opcodes.GTXXSHARDCOST)
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state0.get_token_balance(acc1.recipient, self.genesis_token),
10000000 + 1000000,
)
# Add b0 and make sure one x-shard tx's are added
b3 = state1.create_block_to_mine(xshard_gas_limit=opcodes.GTXXSHARDCOST)
state1.finalize_and_add_block(b3, xshard_gas_limit=opcodes.GTXXSHARDCOST)
# Root block coinbase does not consume xshard gas
self.assertEqual(
state1.get_token_balance(acc1.recipient, self.genesis_token), 10000000
)
def test_xshard_smart_contract(self):
pass
def test_xshard_sender_gas_limit(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=16)
acc3 = Address.create_random_account(full_shard_key=0)
env0 = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=64
)
state0 = create_default_shard_state(env=env0, shard_id=0)
# Add a root block to allow later minor blocks referencing this root block to
# be broadcasted
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(state0.header_tip)
.finalize()
)
state0.add_root_block(root_block)
b0 = state0.get_tip().create_block_to_append()
b0.header.hash_prev_root_block = root_block.header.get_hash()
tx0 = create_transfer_transaction(
shard_state=state0,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=888888,
gas=b0.meta.evm_xshard_gas_limit + 1,
gas_price=1,
)
self.assertFalse(state0.add_tx(tx0))
b0.add_tx(tx0)
with self.assertRaisesRegexp(
RuntimeError, "xshard evm tx exceeds xshard gas limit"
):
state0.finalize_and_add_block(b0)
b2 = state0.create_block_to_mine(
xshard_gas_limit=opcodes.GTXCOST * 9, include_tx=False
)
b2.header.hash_prev_root_block = root_block.header.get_hash()
tx2 = create_transfer_transaction(
shard_state=state0,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=888888,
gas=opcodes.GTXCOST * 10,
gas_price=1,
)
self.assertFalse(state0.add_tx(tx2, xshard_gas_limit=opcodes.GTXCOST * 9))
b2.add_tx(tx2)
with self.assertRaisesRegexp(
RuntimeError, "xshard evm tx exceeds xshard gas limit"
):
state0.finalize_and_add_block(b2, xshard_gas_limit=opcodes.GTXCOST * 9)
b1 = state0.get_tip().create_block_to_append()
b1.header.hash_prev_root_block = root_block.header.get_hash()
tx1 = create_transfer_transaction(
shard_state=state0,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=888888,
gas=b1.meta.evm_xshard_gas_limit,
gas_price=1,
)
b1.add_tx(tx1)
state0.finalize_and_add_block(b1)
def test_fork_resolve(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
b0 = state.get_tip().create_block_to_append()
b1 = state.get_tip().create_block_to_append()
state.finalize_and_add_block(b0)
self.assertEqual(state.header_tip, b0.header)
# Fork happens, first come first serve
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b0.header)
# Longer fork happens, override existing one
b2 = b1.create_block_to_append()
state.finalize_and_add_block(b2)
self.assertEqual(state.header_tip, b2.header)
def test_root_chain_first_consensus(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env0 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
env1 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=1)
genesis = state0.header_tip
# Add one block and prepare a fork
b0 = state0.get_tip().create_block_to_append(address=acc1)
b2 = state0.get_tip().create_block_to_append(
address=Address.create_empty_account()
)
state0.finalize_and_add_block(b0)
state0.finalize_and_add_block(b2)
b1 = state1.get_tip().create_block_to_append()
evm_state = state1.run_block(b1)
b1.finalize(
evm_state=evm_state,
coinbase_amount_map=TokenBalanceMap(evm_state.block_fee_tokens),
)
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(genesis)
.add_minor_block_header(b0.header)
.add_minor_block_header(b1.header)
.finalize()
)
state0.add_root_block(root_block)
b00 = b0.create_block_to_append()
state0.finalize_and_add_block(b00)
self.assertEqual(state0.header_tip, b00.header)
# Create another fork that is much longer (however not confirmed by root_block)
b3 = b2.create_block_to_append()
state0.finalize_and_add_block(b3)
b4 = b3.create_block_to_append()
state0.finalize_and_add_block(b4)
self.assertGreater(b4.header.height, b00.header.height)
self.assertEqual(state0.header_tip, b00.header)
def test_shard_state_add_root_block(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env0 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
env1 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state0 = create_default_shard_state(env=env0, shard_id=0)
state1 = create_default_shard_state(env=env1, shard_id=1)
genesis = state0.header_tip
# Add one block and prepare a fork
b0 = state0.get_tip().create_block_to_append(address=acc1)
b2 = state0.get_tip().create_block_to_append(
address=Address.create_empty_account()
)
state0.finalize_and_add_block(b0)
state0.finalize_and_add_block(b2)
b1 = state1.get_tip().create_block_to_append()
evm_state = state1.run_block(b1)
b1.finalize(
evm_state=evm_state,
coinbase_amount_map=TokenBalanceMap(evm_state.block_fee_tokens),
)
# Add one empty root block
empty_root = state0.root_tip.create_block_to_append().finalize()
state0.add_root_block(empty_root)
root_block = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(genesis)
.add_minor_block_header(b0.header)
.add_minor_block_header(b1.header)
.finalize()
)
root_block1 = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(genesis)
.add_minor_block_header(b2.header)
.add_minor_block_header(b1.header)
.finalize()
)
state0.add_root_block(root_block)
b00 = b0.create_block_to_append()
state0.finalize_and_add_block(b00)
self.assertEqual(state0.header_tip, b00.header)
# Create another fork that is much longer (however not confirmed by root_block)
b3 = b2.create_block_to_append()
state0.finalize_and_add_block(b3)
b4 = b3.create_block_to_append()
state0.finalize_and_add_block(b4)
self.assertEqual(state0.header_tip, b00.header)
self.assertEqual(state0.db.get_minor_block_by_height(2), b00)
self.assertIsNone(state0.db.get_minor_block_by_height(3))
b5 = b1.create_block_to_append()
self.assertFalse(state0.add_root_block(root_block1))
# Add one empty root block
empty_root = root_block1.create_block_to_append().finalize()
state0.add_root_block(empty_root)
root_block2 = (
empty_root.create_block_to_append()
.add_minor_block_header(b3.header)
.add_minor_block_header(b4.header)
.add_minor_block_header(b5.header)
.finalize()
)
self.assertTrue(state0.add_root_block(root_block2))
self.assertEqual(state0.header_tip, b4.header)
self.assertEqual(state0.meta_tip, b4.meta)
self.assertEqual(state0.root_tip, root_block2.header)
self.assertEqual(state0.db.get_minor_block_by_height(2), b3)
self.assertEqual(state0.db.get_minor_block_by_height(3), b4)
def test_shard_reorg_by_adding_root_block(self):
id1 = Identity.create_random_identity()
id2 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id2, full_shard_key=0)
env0 = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state0 = create_default_shard_state(env=env0, shard_id=0)
genesis = state0.header_tip
# Add one block and include it in the root block
b0 = state0.get_tip().create_block_to_append(address=acc1)
b1 = state0.get_tip().create_block_to_append(address=acc2)
root_block0 = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(genesis)
.add_minor_block_header(b0.header)
.finalize()
)
root_block1 = (
state0.root_tip.create_block_to_append()
.add_minor_block_header(genesis)
.add_minor_block_header(b1.header)
.finalize()
)
state0.finalize_and_add_block(b0)
state0.add_root_block(root_block0)
self.assertEqual(state0.header_tip, b0.header)
state0.finalize_and_add_block(b1)
self.assertEqual(state0.header_tip, b0.header)
# Add another root block with higher TD
root_block1.header.total_difficulty += root_block1.header.difficulty
root_block1.header.difficulty *= 2
self.assertTrue(state0.add_root_block(root_block1))
self.assertEqual(state0.header_tip, b1.header)
self.assertEqual(state0.meta_tip, b1.meta)
self.assertEqual(state0.root_tip, root_block1.header)
self.assertEqual(state0.evm_state.trie.root_hash, b1.meta.hash_evm_state_root)
def test_shard_state_add_root_block_too_many_minor_blocks(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(
genesis_account=acc1, genesis_minor_quarkash=10000000, shard_size=1
)
state = create_default_shard_state(env=env, shard_id=0)
max_mblock_in_rblock = state.shard_config.max_blocks_per_shard_in_one_root_block
headers = [state.header_tip]
for i in range(max_mblock_in_rblock):
b = state.get_tip().create_block_to_append(address=acc1)
state.finalize_and_add_block(b)
headers.append(b.header)
root_block = (
state.root_tip.create_block_to_append()
.extend_minor_block_header_list(headers)
.finalize()
)
# Too many blocks
with self.assertRaisesRegexp(
ValueError, "too many minor blocks in the root block"
):
state.add_root_block(root_block)
self.assertEqual(
state.get_unconfirmed_header_list(), headers[:max_mblock_in_rblock]
)
# 10 blocks is okay
root_block.minor_block_header_list = headers[:max_mblock_in_rblock]
root_block.finalize()
state.add_root_block(root_block)
def test_shard_state_fork_resolve_with_higher_root_chain(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
b0 = state.get_tip() # genesis
root_block = (
state.root_tip.create_block_to_append()
.add_minor_block_header(b0.header)
.finalize()
)
self.assertEqual(state.header_tip, b0.header)
self.assertTrue(state.add_root_block(root_block))
b1 = state.get_tip().create_block_to_append()
b2 = state.get_tip().create_block_to_append(nonce=1)
b2.header.hash_prev_root_block = root_block.header.get_hash()
b3 = state.get_tip().create_block_to_append(nonce=2)
b3.header.hash_prev_root_block = root_block.header.get_hash()
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b1.header)
# Fork happens, although they have the same height, b2 survives since it confirms root block
state.finalize_and_add_block(b2)
self.assertEqual(state.header_tip, b2.header)
# b3 confirms the same root block as b2, so it will not override b2
state.finalize_and_add_block(b3)
self.assertEqual(state.header_tip, b2.header)
def test_shard_state_difficulty(self):
env = get_test_env()
for shard_config in env.quark_chain_config.shards.values():
shard_config.GENESIS.DIFFICULTY = 10000
env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK = False
diff_calc = EthDifficultyCalculator(cutoff=9, diff_factor=2048, minimum_diff=1)
env.quark_chain_config.NETWORK_ID = (
1
) # other network ids will skip difficulty check
state = create_default_shard_state(env=env, shard_id=0, diff_calc=diff_calc)
# Check new difficulty
b0 = state.create_block_to_mine(state.header_tip.create_time + 8)
self.assertEqual(
b0.header.difficulty,
state.header_tip.difficulty // 2048 + state.header_tip.difficulty,
)
b0 = state.create_block_to_mine(state.header_tip.create_time + 9)
self.assertEqual(b0.header.difficulty, state.header_tip.difficulty)
b0 = state.create_block_to_mine(state.header_tip.create_time + 17)
self.assertEqual(b0.header.difficulty, state.header_tip.difficulty)
b0 = state.create_block_to_mine(state.header_tip.create_time + 24)
self.assertEqual(
b0.header.difficulty,
state.header_tip.difficulty - state.header_tip.difficulty // 2048,
)
b0 = state.create_block_to_mine(state.header_tip.create_time + 35)
self.assertEqual(
b0.header.difficulty,
state.header_tip.difficulty - state.header_tip.difficulty // 2048 * 2,
)
def test_shard_state_recovery_from_root_block(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
block_headers = [state.header_tip]
block_meta = [state.meta_tip]
for i in range(12):
b = state.get_tip().create_block_to_append(address=acc1)
state.finalize_and_add_block(b)
block_headers.append(b.header)
block_meta.append(b.meta)
# add a fork
b1 = state.db.get_minor_block_by_height(3)
b1.header.create_time += 1
state.finalize_and_add_block(b1)
self.assertEqual(state.db.get_minor_block_by_hash(b1.header.get_hash()), b1)
root_block = state.root_tip.create_block_to_append()
root_block.minor_block_header_list = block_headers[:5]
root_block.finalize()
state.add_root_block(root_block)
recovered_state = ShardState(env=env, full_shard_id=2 | 0)
recovered_state.init_from_root_block(root_block)
self.assertEqual(
recovered_state.db.get_minor_block_by_hash(b1.header.get_hash()), b1
)
self.assertEqual(recovered_state.root_tip, root_block.header)
self.assertEqual(recovered_state.header_tip, block_headers[4])
self.assertEqual(recovered_state.confirmed_header_tip, block_headers[4])
self.assertEqual(recovered_state.meta_tip, block_meta[4])
self.assertEqual(
recovered_state.evm_state.trie.root_hash, block_meta[4].hash_evm_state_root
)
def test_shard_state_recovery_from_genesis(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
block_headers = [state.header_tip]
block_meta = [state.meta_tip]
for i in range(12):
b = state.get_tip().create_block_to_append(address=acc1)
state.finalize_and_add_block(b)
block_headers.append(b.header)
block_meta.append(b.meta)
# Add a few empty root blocks
root_block = None
for i in range(3):
root_block = state.root_tip.create_block_to_append()
root_block.finalize()
state.add_root_block(root_block)
recovered_state = ShardState(env=env, full_shard_id=2 | 0)
# expect to recover from genesis
recovered_state.init_from_root_block(root_block)
genesis = state.db.get_minor_block_by_height(0)
self.assertEqual(recovered_state.root_tip, root_block.header)
self.assertEqual(recovered_state.header_tip, genesis.header)
self.assertIsNone(recovered_state.confirmed_header_tip)
self.assertEqual(recovered_state.meta_tip, genesis.meta)
self.assertEqual(
recovered_state.evm_state.trie.root_hash, genesis.meta.hash_evm_state_root
)
def test_add_block_receipt_root_not_match(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
b1 = state.create_block_to_mine(address=acc3)
# Should succeed
state.finalize_and_add_block(b1)
evm_state = state.run_block(b1)
b1.finalize(
evm_state=evm_state, coinbase_amount_map=b1.header.coinbase_amount_map
)
b1.meta.hash_evm_receipt_root = bytes(32)
def test_not_update_tip_on_root_fork(self):
""" block's hash_prev_root_block must be on the same chain with root_tip to update tip.
+--+
a. |r1|
/+--+
/ |
+--+ / +--+ +--+
|r0|<----|m1|<---|m2| c.
+--+ \ +--+ +--+
\ | |
\+--+ |
b. |r2|<----+
+--+
Initial state: r0 <- m1
Then adding r1, r2, m2 should not make m2 the tip because r1 is the root tip and r2 and r1
are not on the same root chain.
"""
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
# m1 is the genesis block
m1 = state.db.get_minor_block_by_height(0)
r1 = state.root_tip.create_block_to_append()
r2 = state.root_tip.create_block_to_append()
r1.minor_block_header_list.append(m1.header)
r1.finalize()
state.add_root_block(r1)
r2.minor_block_header_list.append(m1.header)
r2.header.create_time = r1.header.create_time + 1 # make r2, r1 different
r2.finalize()
self.assertNotEqual(r1.header.get_hash(), r2.header.get_hash())
state.add_root_block(r2)
self.assertEqual(state.root_tip, r1.header)
m2 = m1.create_block_to_append(address=acc1)
m2.header.hash_prev_root_block = r2.header.get_hash()
state.finalize_and_add_block(m2)
# m2 is added
self.assertEqual(state.db.get_minor_block_by_hash(m2.header.get_hash()), m2)
# but m1 should still be the tip
self.assertEqual(state.header_tip, m1.header)
def test_add_root_block_revert_header_tip(self):
""" block's hash_prev_root_block must be on the same chain with root_tip to update tip.
+--+
|r1|<-------------+
/+--+ |
/ | |
+--+ / +--+ +--+ +--+
|r0|<----|m1|<---|m2| <---|m3|
+--+ \ +--+ +--+ +--+
| \ | \
| \+--+. +--+
| |r2|<-----|r3| (r3 includes m2)
| +--+ +--+
|
| +--+
+-----+|r4| (r4 includes m1)
+--+
Initial state: r0 <- m1 <- m2
Adding r1, r2, m3 makes r1 the root_tip, m3 the header_tip
Adding r3 should change the root_tip to r3, header_tip to m2
Adding r4 (greater total diff) will reset root_tip to r4, header_tip to m2
"""
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env, shard_id=0)
# m1 is the genesis block
m1 = state.db.get_minor_block_by_height(0)
m2 = state.get_tip().create_block_to_append(address=acc1)
state.finalize_and_add_block(m2)
r0 = state.root_tip
r1 = r0.create_block_to_append()
r2 = r0.create_block_to_append()
r1.minor_block_header_list.append(m1.header)
r1.finalize()
state.add_root_block(r1)
r2.minor_block_header_list.append(m1.header)
r2.header.create_time = r1.header.create_time + 1 # make r2, r1 different
r2.finalize()
self.assertNotEqual(r1.header.get_hash(), r2.header.get_hash())
state.add_root_block(r2)
self.assertEqual(state.root_tip, r1.header)
m3 = state.create_block_to_mine(address=acc1)
self.assertEqual(m3.header.hash_prev_root_block, r1.header.get_hash())
state.finalize_and_add_block(m3)
r3 = r2.create_block_to_append(address=acc1)
r3.add_minor_block_header(m2.header)
r3.finalize()
state.add_root_block(r3)
self.assertEqual(state.root_tip, r3.header)
self.assertEqual(state.header_tip, m2.header)
# greater total diff
r4 = r0.create_block_to_append(difficulty=r3.header.total_difficulty * 2)
r4.minor_block_header_list.append(m1.header)
r4.finalize()
state.add_root_block(r4)
self.assertEqual(state.root_tip, r4.header)
self.assertEqual(state.header_tip, m2.header)
def test_posw_fetch_previous_coinbase_address(self):
acc = Address.create_from_identity(
Identity.create_random_identity(), full_shard_key=0
)
env = get_test_env(genesis_account=acc, genesis_minor_quarkash=0)
posw_window_len = 2
state = create_default_shard_state(env=env, shard_id=0)
m = state.get_tip().create_block_to_append(address=acc)
coinbase_blockcnt = state._get_posw_coinbase_blockcnt(
m.header.hash_prev_minor_block, length=posw_window_len
)
self.assertEqual(len(coinbase_blockcnt), 1) # Genesis
state.finalize_and_add_block(m)
# Note PoSW window size is 2
prev_addr = None
for i in range(4):
random_acc = Address.create_random_account(full_shard_key=0)
m = state.get_tip().create_block_to_append(address=random_acc)
coinbase_blockcnt = state._get_posw_coinbase_blockcnt(
m.header.hash_prev_minor_block, length=posw_window_len
)
self.assertEqual(len(coinbase_blockcnt), 2)
# Count should all equal 1
self.assertEqual(len(set(coinbase_blockcnt.values())), 1)
self.assertEqual(list(coinbase_blockcnt.values())[0], 1)
if prev_addr: # Should always contain previous block's coinbase
self.assertTrue(prev_addr in coinbase_blockcnt)
state.finalize_and_add_block(m)
prev_addr = random_acc.recipient
# Cached should have certain items
self.assertEqual(len(state.coinbase_addr_cache), 1)
self.assertEqual(len(state.coinbase_addr_cache[2]), 5)
def test_posw_coinbase_address_count_by_diff_length(self):
acc = Address.create_from_identity(
Identity.create_random_identity(), full_shard_key=0
)
env = get_test_env(genesis_account=acc, genesis_minor_quarkash=0)
state = create_default_shard_state(env=env, shard_id=0)
for i in range(4):
random_acc = Address.create_random_account(full_shard_key=0)
m = state.get_tip().create_block_to_append(address=random_acc)
state.finalize_and_add_block(m)
sum_cnt = lambda d: sum(d.values())
for length in range(1, 5):
coinbase_blockcnt = state._get_posw_coinbase_blockcnt(
m.header.get_hash(), length
)
self.assertEqual(sum_cnt(coinbase_blockcnt), length)
# Make sure internal cache state is correct
self.assertEqual(len(state.coinbase_addr_cache), 4)
def test_posw_coinbase_send_under_limit(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
id2 = Identity.create_random_identity()
acc2 = Address.create_from_identity(id2, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=0)
state = create_default_shard_state(env=env, shard_id=0, posw_override=True)
state.shard_config.COINBASE_AMOUNT = 8
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 2
state.shard_config.POSW_CONFIG.WINDOW_SIZE = 4
# Add a root block to have all the shards initialized, also include the genesis from
# another shard to allow x-shard tx TO that shard
root_block = state.root_tip.create_block_to_append()
root_block.add_minor_block_header(
create_default_shard_state(env=env, shard_id=1).header_tip
)
state.add_root_block(root_block.finalize())
m = state.get_tip().create_block_to_append(address=acc1)
state.finalize_and_add_block(m)
self.assertEqual(len(state.evm_state.sender_disallow_map), 2)
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT // 2, # tax rate is 0.5
)
self.assertEqual(
state.evm_state.sender_disallow_map, {bytes(20): 2, acc1.recipient: 2}
)
# Try to send money from that account
tx0 = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=Address.create_empty_account(0),
value=1,
gas=21000,
gas_price=0,
)
res = state.execute_tx(tx0, acc1)
self.assertIsNotNone(res, "tx should succeed")
# Create a block including that tx, receipt should also report error
self.assertTrue(state.add_tx(tx0))
m = state.create_block_to_mine(address=acc2)
state.finalize_and_add_block(m)
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT // 2 - 1, # tax rate is 0.5
)
self.assertEqual(
state.evm_state.sender_disallow_map,
{bytes(20): 2, acc1.recipient: 2, acc2.recipient: 2},
)
tx1 = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=Address.create_empty_account(0),
value=2,
gas=21000,
gas_price=0,
)
res = state.execute_tx(tx1)
self.assertIsNone(res, "tx should fail")
# Create a block including that tx, receipt should also report error
self.assertTrue(state.add_tx(tx1))
m = state.create_block_to_mine(address=acc2)
state.finalize_and_add_block(m)
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT // 2 - 1, # tax rate is 0.5
)
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT, # tax rate is 0.5
)
self.assertEqual(
state.evm_state.sender_disallow_map, {acc1.recipient: 2, acc2.recipient: 4}
)
tx2 = create_transfer_transaction(
shard_state=state,
key=id2.get_key(),
from_address=acc2,
to_address=Address.create_empty_account(0),
value=5,
gas=21000,
gas_price=0,
)
res = state.execute_tx(tx2)
self.assertIsNone(res, "tx should fail")
tx3 = create_transfer_transaction(
shard_state=state,
key=id2.get_key(),
from_address=acc2,
to_address=Address.create_empty_account(0),
value=4,
gas=21000,
gas_price=0,
)
res = state.execute_tx(tx3, acc2)
self.assertIsNotNone(res, "tx should succeed")
def test_posw_coinbase_send_equal_locked(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=0)
state = create_default_shard_state(env=env, shard_id=0, posw_override=True)
state.shard_config.COINBASE_AMOUNT = 10
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 2
state.shard_config.POSW_CONFIG.WINDOW_SIZE = 4
# Add a root block to have all the shards initialized, also include the genesis from
# another shard to allow x-shard tx TO that shard
root_block = state.root_tip.create_block_to_append()
root_block.add_minor_block_header(
create_default_shard_state(env=env, shard_id=1).header_tip
)
state.add_root_block(root_block.finalize())
m = state.create_block_to_mine(address=acc1)
state.finalize_and_add_block(m)
self.assertEqual(len(state.evm_state.sender_disallow_map), 2)
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT // 2, # tax rate is 0.5
)
self.assertEqual(
state.evm_state.sender_disallow_map, {bytes(20): 2, acc1.recipient: 2}
)
# Try to send money from that account, the expected locked tokens are 4
tx0 = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=Address.create_empty_account(0),
value=1,
gas=21000,
gas_price=0,
)
state.tx_queue.add_transaction(tx0.tx.to_evm_tx())
m = state.create_block_to_mine(address=acc1)
state.finalize_and_add_block(m)
r = state.get_transaction_receipt(tx0.get_hash())
self.assertEqual(r[2].success, b"\x01") # Success
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT - 1,
)
def test_posw_coinbase_send_above_locked(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=0)
state = create_default_shard_state(env=env, shard_id=0, posw_override=True)
state.shard_config.COINBASE_AMOUNT = 10
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 2
state.shard_config.POSW_CONFIG.WINDOW_SIZE = 4
# Add a root block to have all the shards initialized, also include the genesis from
# another shard to allow x-shard tx TO that shard
root_block = state.root_tip.create_block_to_append()
root_block.add_minor_block_header(
create_default_shard_state(env=env, shard_id=1).header_tip
)
state.add_root_block(root_block.finalize())
m = state.create_block_to_mine(address=acc1)
state.finalize_and_add_block(m)
self.assertEqual(len(state.evm_state.sender_disallow_map), 2)
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT // 2, # tax rate is 0.5
)
self.assertEqual(
state.evm_state.sender_disallow_map, {bytes(20): 2, acc1.recipient: 2}
)
# Try to send money from that account, the expected locked tokens are 4
tx0 = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=Address.create_empty_account(0),
value=2,
gas=21000,
gas_price=0,
)
state.tx_queue.add_transaction(tx0.tx.to_evm_tx())
m = state.create_block_to_mine(address=acc1)
state.finalize_and_add_block(m)
r = state.get_transaction_receipt(tx0.get_hash())
self.assertEqual(r[2].success, b"") # Failure
self.assertEqual(
state.get_token_balance(acc1.recipient, self.genesis_token),
state.shard_config.COINBASE_AMOUNT, # tax rate is 0.5
)
def test_posw_validate_minor_block_seal(self):
acc = Address(b"\x01" * 20, full_shard_key=0)
env = get_test_env(genesis_account=acc, genesis_minor_quarkash=256)
state = create_default_shard_state(env=env, shard_id=0, posw_override=True)
# Force PoSW
state.shard_config.CONSENSUS_TYPE = ConsensusType.POW_DOUBLESHA256
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 1
state.shard_config.POSW_CONFIG.WINDOW_SIZE = 256
state.shard_config.POSW_CONFIG.DIFF_DIVIDER = 1000
self.assertEqual(
state.get_token_balance(acc.recipient, self.genesis_token), 256
)
genesis = Address(bytes(20), 0)
self.assertEqual(
state.get_token_balance(genesis.recipient, self.genesis_token), 0
)
# Genesis already has 1 block but zero stake, so no change to block diff
m = state.get_tip().create_block_to_append(address=genesis, difficulty=1000)
with self.assertRaises(ValueError):
state.finalize_and_add_block(m)
# Total stake * block PoSW is 256, so acc should pass the check no matter
# how many blocks he mined before
for _ in range(4):
for nonce in range(4): # Try different nonce
m = state.get_tip().create_block_to_append(
address=acc, difficulty=1000, nonce=nonce
)
state.validate_minor_block_seal(m)
state.finalize_and_add_block(m)
def test_posw_window_edge_cases(self):
acc = Address(b"\x01" * 20, full_shard_key=0)
env = get_test_env(genesis_account=acc, genesis_minor_quarkash=500)
state = create_default_shard_state(
env=env, shard_id=0, posw_override=True, no_coinbase=True
)
# Force PoSW
state.shard_config.CONSENSUS_TYPE = ConsensusType.POW_DOUBLESHA256
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 500
state.shard_config.POSW_CONFIG.WINDOW_SIZE = 2
state.shard_config.POSW_CONFIG.DIFF_DIVIDER = 1000
# Use 0 to denote blocks mined by others, 1 for blocks mined by acc,
# stake * state per block = 1 for acc, 0 <- [curr], so current block
# should enjoy the diff adjustment
m = state.get_tip().create_block_to_append(address=acc, difficulty=1000)
state.finalize_and_add_block(m)
# Make sure stakes didn't change
self.assertEqual(
state.get_token_balance(acc.recipient, self.genesis_token), 500
)
# 0 <- 1 <- [curr], the window already has one block with PoSW benefit,
# mining new blocks should fail
m = state.get_tip().create_block_to_append(address=acc, difficulty=1000)
with self.assertRaises(ValueError):
state.finalize_and_add_block(m)
def test_incorrect_coinbase_amount(self):
env = get_test_env()
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
b = state.create_block_to_mine()
evm_state = state.run_block(b)
b.finalize(
evm_state=evm_state,
coinbase_amount_map=state.get_coinbase_amount_map(b.header.height),
)
state.add_block(b)
b = state.create_block_to_mine()
wrong_coinbase = state.get_coinbase_amount_map(b.header.height)
wrong_coinbase.add({self.genesis_token: +1})
b.finalize(evm_state=evm_state, coinbase_amount_map=wrong_coinbase)
with self.assertRaises(ValueError):
state.add_block(b)
def test_shard_coinbase_decay(self):
env = get_test_env()
state = create_default_shard_state(env=env)
coinbase = state.get_coinbase_amount_map(state.shard_config.EPOCH_INTERVAL)
self.assertEqual(
coinbase.balance_map,
{
env.quark_chain_config.genesis_token: state.shard_config.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
* env.quark_chain_config.REWARD_TAX_RATE
},
)
coinbase = state.get_coinbase_amount_map(state.shard_config.EPOCH_INTERVAL + 1)
self.assertEqual(
coinbase.balance_map,
{
env.quark_chain_config.genesis_token: state.shard_config.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
* env.quark_chain_config.REWARD_TAX_RATE
},
)
coinbase = state.get_coinbase_amount_map(state.shard_config.EPOCH_INTERVAL * 2)
self.assertEqual(
coinbase.balance_map,
{
env.quark_chain_config.genesis_token: state.shard_config.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR ** 2
* env.quark_chain_config.REWARD_TAX_RATE
},
)
def test_enable_tx_timestamp(self):
# whitelist acc1, make tx to acc2
# but do not whitelist acc2 and tx fails
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
id2 = Identity.create_random_identity()
acc2 = Address.create_from_identity(id2, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=5000000,
gas=50000,
)
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine()
self.assertEqual(len(b1.tx_list), 1)
env.quark_chain_config.ENABLE_TX_TIMESTAMP = b1.header.create_time + 100
env.quark_chain_config.TX_WHITELIST_SENDERS = [acc1.recipient.hex()]
b2 = state.create_block_to_mine()
self.assertEqual(len(b2.tx_list), 1)
state.finalize_and_add_block(b2)
tx2 = create_transfer_transaction(
shard_state=state,
key=id2.get_key(),
from_address=acc2,
to_address=acc3,
value=12345,
gas=50000,
)
env.quark_chain_config.ENABLE_TX_TIMESTAMP = None
self.assertTrue(state.add_tx(tx2))
b3 = state.create_block_to_mine()
self.assertEqual(len(b3.tx_list), 1)
env.quark_chain_config.ENABLE_TX_TIMESTAMP = b1.header.create_time + 100
b4 = state.create_block_to_mine()
self.assertEqual(len(b4.tx_list), 0)
with self.assertRaisesRegexp(
RuntimeError, "unwhitelisted senders not allowed before tx is enabled"
):
state.finalize_and_add_block(b3)
def test_enable_evm_timestamp_with_contract_create(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_contract_creation_transaction(
shard_state=state, key=id1.get_key(), from_address=acc1, to_full_shard_key=0
)
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine()
self.assertEqual(len(b1.tx_list), 1)
env.quark_chain_config.ENABLE_EVM_TIMESTAMP = b1.header.create_time + 100
b2 = state.create_block_to_mine()
self.assertEqual(len(b2.tx_list), 0)
with self.assertRaisesRegexp(
RuntimeError, "smart contract tx is not allowed before evm is enabled"
):
state.finalize_and_add_block(b1)
def test_enable_evm_timestamp_with_contract_call(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(genesis_account=acc1, genesis_minor_quarkash=10000000)
state = create_default_shard_state(env=env)
# Add a root block to have all the shards initialized
root_block = state.root_tip.create_block_to_append().finalize()
state.add_root_block(root_block)
tx = create_transfer_transaction(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_address=acc2,
value=12345,
gas=50000,
data=b"1234",
)
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine()
self.assertEqual(len(b1.tx_list), 1)
env.quark_chain_config.ENABLE_EVM_TIMESTAMP = b1.header.create_time + 100
b2 = state.create_block_to_mine()
self.assertEqual(len(b2.tx_list), 0)
with self.assertRaisesRegexp(
RuntimeError, "smart contract tx is not allowed before evm is enabled"
):
state.finalize_and_add_block(b1)
def test_failed_transaction_gas(self):
"""in-shard revert contract transaction validating the failed transaction gas used
"""
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=0)
env = get_test_env(
genesis_account=acc1,
genesis_minor_token_balances={self.genesis_token_str: 200 * 10 ** 18},
)
state = create_default_shard_state(env=env)
# Create failed contract with revert operation
contract_creation_with_revert_bytecode = (
"6080604052348015600f57600080fd5b50600080fdfe"
)
"""
pragma solidity ^0.5.1;
contract RevertContract {
constructor() public {
revert();
}
}
"""
# This transaction cost is calculated by remix, which is different than the opcodes.GTXCOST due to revert.
FAILED_TRANSACTION_COST = 54416
tx = contract_creation_tx(
shard_state=state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_key=acc1.full_shard_key,
bytecode=contract_creation_with_revert_bytecode,
gas_token_id=self.genesis_token,
transfer_token_id=self.genesis_token,
)
# Should succeed
self.assertTrue(state.add_tx(tx))
b1 = state.create_block_to_mine(address=acc2)
self.assertEqual(len(b1.tx_list), 1)
state.finalize_and_add_block(b1)
self.assertEqual(state.header_tip, b1.header)
# Check receipts and make sure the transaction is failed
self.assertEqual(len(state.evm_state.receipts), 1)
self.assertEqual(state.evm_state.receipts[0].state_root, b"")
self.assertEqual(state.evm_state.receipts[0].gas_used, FAILED_TRANSACTION_COST)
# Make sure the FAILED_TRANSACTION_COST is consumed by the sender
self.assertEqual(
state.get_token_balance(id1.recipient, self.genesis_token),
200 * 10 ** 18 - FAILED_TRANSACTION_COST,
)
# Make sure the accurate gas fee is obtained by the miner
self.assertEqual(
state.get_token_balance(acc2.recipient, self.genesis_token),
self.get_after_tax_reward(FAILED_TRANSACTION_COST + self.shard_coinbase),
)
self.assertEqual(
b1.header.coinbase_amount_map.balance_map,
{
env.quark_chain_config.genesis_token: self.get_after_tax_reward(
FAILED_TRANSACTION_COST + self.shard_coinbase
)
},
)
| 39.377625
| 114
| 0.640638
| 13,026
| 103,130
| 4.742822
| 0.041072
| 0.038459
| 0.035351
| 0.031062
| 0.857575
| 0.828164
| 0.804532
| 0.770201
| 0.739268
| 0.707106
| 0
| 0.041966
| 0.276787
| 103,130
| 2,618
| 115
| 39.392666
| 0.786351
| 0.092825
| 0
| 0.674545
| 0
| 0
| 0.005469
| 0.000818
| 0
| 0
| 0
| 0
| 0.134909
| 1
| 0.02708
| false
| 0.000492
| 0.005416
| 0.000492
| 0.033973
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c1075150e252d781f1bf9520197483078e4ba55
| 6,735
|
py
|
Python
|
DA-ESS-CbResponse/bin/tab_splunktalib/conf_manager/data_input_endpoints.py
|
hawkdavis/cb-response-splunk-app
|
120fe9d9a6a0d283cb7f91ff378ae33ba2f3cea6
|
[
"Apache-2.0"
] | null | null | null |
DA-ESS-CbResponse/bin/tab_splunktalib/conf_manager/data_input_endpoints.py
|
hawkdavis/cb-response-splunk-app
|
120fe9d9a6a0d283cb7f91ff378ae33ba2f3cea6
|
[
"Apache-2.0"
] | null | null | null |
DA-ESS-CbResponse/bin/tab_splunktalib/conf_manager/data_input_endpoints.py
|
hawkdavis/cb-response-splunk-app
|
120fe9d9a6a0d283cb7f91ff378ae33ba2f3cea6
|
[
"Apache-2.0"
] | null | null | null |
import urllib
import tab_splunktalib.common.xml_dom_parser as xdp
from tab_splunktalib.conf_manager.request import content_request
INPUT_ENDPOINT = "%s/servicesNS/%s/%s/data/inputs/%s"
def _input_endpoint_ns(uri, owner, app, input_type):
return INPUT_ENDPOINT % (uri, owner, app, input_type)
def reload_data_input(splunkd_uri,
session_key,
owner,
app_name,
input_type,
throw=False):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
"""
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += "/_reload"
msg = "Failed to reload data input in app=%s: %s" % (app_name, input_type)
try:
content_request(uri, session_key, "GET", None, msg)
except Exception:
if throw:
raise
def create_data_input(splunkd_uri, session_key, owner, app_name, input_type,
name, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
:param name: The name of the input stanza to create.
i.e. stanza [<input_type>://<name>] will be created.
:param key_values: a K-V dict of details in the data input stanza.
:return: None on success else raise exception
"""
key_values["name"] = name
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
msg = "Failed to create data input in app=%s: %s://%s" % (
app_name, input_type, name)
content_request(uri, session_key, "POST", key_values, msg)
def get_data_input(splunkd_uri,
session_key,
owner,
app_name,
input_type,
name=None):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
:param name: The name of the input stanza to create.
i.e. stanza [<input_type>://<name>] will be deleted.
:return: a list of stanzas in the input type, including metadata
"""
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
if name:
uri += urllib.quote("/" + name.replace("/", "%2F"))
# get all the stanzas at one time
uri += "?count=0&offset=0"
msg = "Failed to get data input in app=%s: %s://%s" % (
app_name, input_type, name)
content = content_request(uri, session_key, "GET", None, msg)
return xdp.parse_conf_xml_dom(content)
def update_data_input(splunkd_uri, session_key, owner, app_name, input_type,
name, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
:param name: The name of the input stanza to create.
i.e. stanza [<input_type>://<name>] will be updated.
:param key_values: a K-V dict of details in the data input stanza.
:return: raise exception when failure
"""
if "name" in key_values:
del key_values["name"]
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += urllib.quote("/" + name.replace("/", "%2F"))
msg = "Failed to update data input in app=%s: %s://%s" % (
app_name, input_type, name)
content_request(uri, session_key, "POST", key_values, msg)
def delete_data_input(splunkd_uri, session_key, owner, app_name, input_type,
name):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
:param name: The name of the input stanza to create.
i.e. stanza [<input_type>://<name>] will be deleted.
:return raise exception when failed
"""
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += urllib.quote("/" + name.replace("/", "%2F"))
msg = "Failed to delete data input in app=%s: %s://%s" % (
app_name, input_type, name)
content_request(uri, session_key, "DELETE", None, msg)
def operate_data_input(splunkd_uri, session_key, owner, app_name, input_type,
name, operation):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
:param owner: the owner (ACL user), e.g. "-", "nobody"
:param app_name: the app"s name, e.g. "Splunk_TA_aws"
:param input_type: name of the input type.
if it is a script input, the input is "script",
for modinput, say snow, the input is "snow"
:param name: The name of the input stanza to create.
i.e. stanza [<input_type>://<name>] will be operated.
:param operation: must be "disable" or "enable"
"""
assert operation in ("disable", "enable")
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += "/%s/%s" % (urllib.quote(name.replace("/", "%2F")), operation)
msg = "Failed to %s data input in app=%s: %s://%s" % (
operation, app_name, input_type, name)
content_request(uri, session_key, "POST", None, msg)
| 41.832298
| 78
| 0.605345
| 977
| 6,735
| 4.006141
| 0.116684
| 0.087379
| 0.06975
| 0.073582
| 0.812468
| 0.796117
| 0.781042
| 0.776699
| 0.757793
| 0.757793
| 0
| 0.013558
| 0.277209
| 6,735
| 160
| 79
| 42.09375
| 0.790468
| 0.488938
| 0
| 0.384615
| 0
| 0
| 0.126473
| 0.010831
| 0
| 0
| 0
| 0
| 0.015385
| 1
| 0.107692
| false
| 0
| 0.046154
| 0.015385
| 0.184615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c1583ee64efa2f240f83ba11c5e66f78c6d2ac0
| 16,940
|
py
|
Python
|
ui_automation_core/helpers/actions/mouse_action.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
ui_automation_core/helpers/actions/mouse_action.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
ui_automation_core/helpers/actions/mouse_action.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webelement import WebElement
from ui_automation_core.helpers import js_executor
from ui_automation_core.helpers.web_element.locator import Locator
from ui_automation_core.helpers.web_element.wait_states import ElementWaitState
class ClickMethod(Enum):
API_CLICK = auto()
ACTION_CHAIN_CLICK = auto()
JAVA_SCRIPT_CLICK = auto()
class MouseAction:
"""
MouseAction class is a collection of Mouse Actions that you want to perform on an web element.
"""
def __init__(self, context):
self.context = context
def click_web_element(self, locator=None, click_method=ClickMethod.API_CLICK,
wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Simulates user clicking on an element with different click methods available.
:param locator: Web element or a locator string on which the click action need to be performed
:param click_method: Method to perform click and by default click_method=ClickMethod.API_CLICK
Available methods are:
API_CLICK
JAVA_SCRIPT_CLICK
ACTION_CHAIN_CLICK
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
if not isinstance(click_method, ClickMethod):
raise TypeError(f'`{click_method}` must be an instance of ClickMethod.')
if locator is None:
raise ValueError('Please provide the string pattern or a web element to perform a click')
if isinstance(locator, WebElement):
element, element_to_log = locator, locator.get_attribute('outerHTML')
else:
element, element_to_log = Locator(self.context).get_element(locator, wait_state, True, timeout), locator
if click_method is ClickMethod.API_CLICK:
element.click()
self.context.logger.info(
f'Successfully clicked on the element {element_to_log}')
if click_method is ClickMethod.JAVA_SCRIPT_CLICK:
js_executor.execute_javascript('arguments[0].click();', element)
self.context.logger.info(
f'Successfully clicked on the element {element_to_log}')
if click_method is ClickMethod.ACTION_CHAIN_CLICK:
ActionChains(self.context.driver).click(element).perform()
self.context.logger.info(
f'Successfully clicked on the element {element_to_log}')
return self
except TypeError:
self.context.logger.error(f'`{click_method}` must be an instance of ClickMethod')
raise TypeError
except ValueError:
self.context.logger.error('String pattern is None. Please provide a valid pattern to locate the element.')
raise ValueError
except Exception as ex:
self.context.logger.error(
f'Unable to click on the element `{element_to_log}`.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to click on the element `{element_to_log}`. Error: {ex}')
def double_click(self, locator=None, wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Double-clicks an element.
:param locator: Web element or a locator string on which the click action need to be performed
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
if locator is None:
raise ValueError('Please provide the string pattern or a web element to perform a double click.')
if isinstance(locator, WebElement):
element, element_to_log = locator, locator.get_attribute('outerHTML')
else:
element, element_to_log = Locator(self.context).get_element(locator, wait_state, True, timeout) \
if locator is not None else None, locator
ActionChains(self.context.driver).double_click(element).perform()
self.context.logger.info(
f'Successfully double clicked on element {element_to_log}')
return self
except ValueError:
self.context.logger.error(
'String pattern is None. Please provide a valid pattern to locate the element and perform a '
'click action.')
raise ValueError
except Exception as ex:
self.context.logger.error(f'Unable to double click on element {element_to_log}.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to double click on element {element_to_log}. Error: {ex}')
def context_click(self, locator=None, wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Right-click on the given element.
:param locator: Web element or a locator string on which the click action need to be performed
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
if locator is None:
raise ValueError('Please provide the string pattern or a web element to perform a right click.')
if isinstance(locator, WebElement):
element, element_to_log = locator, locator.get_attribute('outerHTML')
else:
element, element_to_log = Locator(self.context).get_element(locator, wait_state, True, timeout) \
if locator is not None else None, locator
ActionChains(self.context.driver).context_click(element).perform()
self.context.logger.info(
f'Successfully right clicked on element {element_to_log}')
return self
except ValueError:
self.context.logger.error(
'String pattern is None. Please provide a valid pattern to locate the element and perform a '
'right click action.')
raise ValueError
except Exception as ex:
self.context.logger.error(f'Unable to right click on element {element_to_log}.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to right click on element {element_to_log}. Error: {ex}')
def move_cursor_to_element(self, locator, wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Simulate users hovering a mouse over the given element.
:param locator: Web element or a locator string on which the click action need to be performed
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
if locator is None:
raise ValueError('Please provide the string pattern or a web element to perform an action.')
if isinstance(locator, WebElement):
element, element_to_log = locator, locator.get_attribute('outerHTML')
else:
element, element_to_log = Locator(self.context).get_element(locator, wait_state, True, timeout) \
if locator is not None else None, locator
ActionChains(self.context.driver).move_to_element(element).perform()
self.context.logger.info(
f'Successfully moved the cursor on to the element {element_to_log}')
return self
except ValueError:
self.context.logger.error(
'String pattern is None. Please provide a valid pattern to locate the element and perform an '
'action.')
raise ValueError
except Exception as ex:
self.context.logger.error(f'Unable to move the cursor to the element {element_to_log}.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to move to the element {element_to_log}. Error: {ex}')
def move_cursor_by_offset(self, x_offset, y_offset):
"""
Moving the mouse to an offset from current mouse position.
:param x_offset: X offset to move to, as a positive or negative integer.
:param y_offset: Y offset to move to, as a positive or negative integer.
:return: self
"""
try:
ActionChains(self.context.driver).move_by_offset(
x_offset, y_offset).perform()
self.context.logger.info(
f'Successfully moved by offset {x_offset, y_offset}')
return self
except Exception as ex:
self.context.logger.error(f'Unable to move by offset {x_offset, y_offset}.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to move by offset {x_offset, y_offset}. Error: {ex}')
def move_cursor_to_element_by_offset(self, locator, x_offset, y_offset,
wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Simulate users hovering a mouse over the given element with the relative position (x, y)
from the top-left corner of that element.
:param locator: locator: Web element or a locator string on which the click action need to be performed
:param x_offset: X offset to move to, as a positive or negative integer.
:param y_offset: Y offset to move to, as a positive or negative integer.
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
element, element_to_log = (locator, locator.get_attribute('outerHTML')) \
if isinstance(locator, WebElement) \
else (Locator(self.context).get_element(locator, wait_state, True, timeout), locator)
(ActionChains(self.context.driver).move_to_element_with_offset(element, x_offset, y_offset).perform())
self.context.logger.info(f'Successfully moved mouse pointer by an offset {x_offset, y_offset} '
f'on the element {element_to_log}')
return self
except Exception as ex:
self.context.logger.error(f'Unable to move by an offset {x_offset, y_offset} '
f'on the element {element_to_log}')
self.context.logger.exception(ex)
raise Exception(f'Unable to move by an offset {x_offset, y_offset} to the '
f'element {element_to_log}. Error: {ex}')
def drag_and_drop_to_object(self, source, target, wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Drag an object and drop it onto another object. Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:param source: The element to mouse down. (element to be moved). Can be a locator string or an web element.
:param target: The element to mouse up. (destination location). Can be locator string or an web element
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
trg_element = None
trg_element_to_log = None
src_element_to_log = None
try:
if source is None:
raise ValueError(
'Please provide the `source` string pattern or a web element to perform drag and drop.')
if target is None:
raise ValueError(
'Please provide the `target` string pattern or a web element to perform a drag and drop.')
if isinstance(source, WebElement):
src_element, src_element_to_log = source, source.get_attribute('outerHTML')
else:
src_element, src_element_to_log = Locator(self.context).get_element(source, wait_state, True,
timeout), source
if isinstance(target, WebElement):
src_element, trg_element_to_log = target, target.get_attribute('outerHTML')
else:
trg_element, trg_element_to_log = Locator(self.context).get_element(target, wait_state, True,
timeout), target
(ActionChains(self.context.driver).drag_and_drop(src_element, trg_element).perform())
self.context.logger.info(f'Successfully dragged from the source element '
f'{src_element_to_log} and dropped onto target element {trg_element_to_log}')
return self
except ValueError:
self.context.logger.error(
f'Locator pattern is None. Please provide a valid {"`source`" if source is None else "`target`"}'
f' pattern to locate the element and perform a drag and drop operation.')
raise ValueError
except Exception as ex:
self.context.logger.error(f'Unable to drag and drop on elements {src_element_to_log} '
f'and {trg_element_to_log}.')
self.context.logger.exception(ex)
raise Exception(f'Unable to drag and drop on elements {src_element_to_log} '
f'and {trg_element_to_log}. Error: {ex}')
def drag_and_drop_by_offset(self, src_locator, x_offset, y_offset,
wait_state=ElementWaitState.PRESENT, timeout=None):
"""
Drag an object and drop it to an offset location. Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
:param src_locator: The element to mouse down. (element to be moved). Can be a locator string or an web element
:param x_offset: X offset to move to
:param y_offset: Y offset to move to.
:param wait_state: he wait state for retrial. Choose state from ElementWaitState class.
:param timeout: wait time before throwing any exception.
If None, timeout defaults to 20 seconds.
:return: self
"""
element_to_log = None
try:
if src_locator is None:
raise ValueError(
'Please provide the `source` string pattern or a web element to perform drag and drop.')
if isinstance(src_locator, WebElement):
element, element_to_log = src_locator, src_locator.get_attribute('outerHTML')
else:
element, element_to_log = Locator(self.context).get_element(src_locator, wait_state, True, timeout), \
src_locator
(ActionChains(self.context.driver)
.drag_and_drop_by_offset(element, x_offset, y_offset).perform())
self.context.logger.info(
f'Successfully moved the source element {element_to_log} by an offset {x_offset, y_offset}')
return self
except ValueError:
self.context.logger.error(
f'Locator pattern is None. Please provide a valid `source`'
f' pattern to locate the element and perform a drag and drop operation.')
raise ValueError
except Exception as ex:
self.context.logger.error(
f'Unable to move the source element {element_to_log} by an offset {x_offset, y_offset}.')
self.context.logger.exception(ex)
raise Exception(
f'Unable to move the source element {element_to_log} by an offset {x_offset, y_offset}. Error: {ex}')
| 49.823529
| 120
| 0.62013
| 2,077
| 16,940
| 4.922484
| 0.081849
| 0.053697
| 0.057512
| 0.05761
| 0.838615
| 0.810935
| 0.790786
| 0.763204
| 0.735622
| 0.688185
| 0
| 0.001287
| 0.311983
| 16,940
| 339
| 121
| 49.970501
| 0.875933
| 0.216942
| 0
| 0.525581
| 0
| 0.004651
| 0.252607
| 0.004978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04186
| false
| 0
| 0.027907
| 0
| 0.130233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c3e24b42abf1e4f5519bd685437d976c8ec34a9
| 263
|
py
|
Python
|
reminders/tests.py
|
BelikovYoav/Beyond-07-team-3
|
407efd58ed5d98fd0862601d792c0415464b45cc
|
[
"MIT"
] | null | null | null |
reminders/tests.py
|
BelikovYoav/Beyond-07-team-3
|
407efd58ed5d98fd0862601d792c0415464b45cc
|
[
"MIT"
] | 8
|
2022-02-28T17:05:35.000Z
|
2022-03-06T22:53:54.000Z
|
reminders/tests.py
|
BelikovYoav/Beyond-07-team-3
|
407efd58ed5d98fd0862601d792c0415464b45cc
|
[
"MIT"
] | 5
|
2022-02-28T13:45:11.000Z
|
2022-03-06T15:26:54.000Z
|
from .class_tests.create_reminder_tests import * # noqa: F403 F401
from .class_tests.update_reminder_tests import * # noqa: F403 F401
from .class_tests.reminders_tests import * # noqa: F403 F401
from .class_tests.notification_tests import * # noqa: F403 F401
| 52.6
| 67
| 0.787072
| 38
| 263
| 5.184211
| 0.315789
| 0.182741
| 0.284264
| 0.385787
| 0.761421
| 0.64467
| 0.64467
| 0.64467
| 0.456853
| 0
| 0
| 0.105727
| 0.136882
| 263
| 4
| 68
| 65.75
| 0.762115
| 0.239544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92d618be3c0c6a273503ff2d722e169ff3a4ee45
| 30
|
py
|
Python
|
__init__.py
|
falgon/pelican_dynamic
|
611ed1666fc4014cc1ffee71ec2f18af399348f0
|
[
"MIT"
] | null | null | null |
__init__.py
|
falgon/pelican_dynamic
|
611ed1666fc4014cc1ffee71ec2f18af399348f0
|
[
"MIT"
] | null | null | null |
__init__.py
|
falgon/pelican_dynamic
|
611ed1666fc4014cc1ffee71ec2f18af399348f0
|
[
"MIT"
] | null | null | null |
from .pelican_dynamic import *
| 30
| 30
| 0.833333
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92d70b52ce2c89029477617e1ff993d157cc2e0e
| 1,465
|
py
|
Python
|
tests.py
|
plumdog/myhome
|
cc829fc5c76128adffb1049683194f16f18bb3a8
|
[
"MIT"
] | null | null | null |
tests.py
|
plumdog/myhome
|
cc829fc5c76128adffb1049683194f16f18bb3a8
|
[
"MIT"
] | null | null | null |
tests.py
|
plumdog/myhome
|
cc829fc5c76128adffb1049683194f16f18bb3a8
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from dateutil.tz import tzutc
from backend import get_post
def test_backend_parsing():
content = '''Title: Example title
Subtitle: Example subtitle
Tags: tag1, tag2
Datetime: 2016-02-07 15:25:30+00:00
Live: True
Content:
Content here
'''
post = get_post(content.splitlines())
assert post.title == 'Example title'
assert post.subtitle == 'Example subtitle'
assert post.tags == ['tag1', 'tag2']
assert post.datetime == datetime(2016, 2, 7, 15, 25, 30, tzinfo=tzutc())
def test_backend_parsing_no_tags():
content = '''Title: Example title
Subtitle: Example subtitle
Datetime: 2016-02-07 15:25:30+00:00
Content:
Content here
'''
post = get_post(content.splitlines())
assert post.title == 'Example title'
assert post.subtitle == 'Example subtitle'
assert not post.tags
assert post.datetime == datetime(2016, 2, 7, 15, 25, 30, tzinfo=tzutc())
assert post.content.strip() == 'Content here'
def test_backend_parsing_multiline_content():
content = '''Title: Example title
Subtitle: Example subtitle
Datetime: 2016-02-07 15:25:30+00:00
Content:
Content here
more here
'''
post = get_post(content.splitlines())
assert post.title == 'Example title'
assert post.subtitle == 'Example subtitle'
assert not post.tags
assert post.datetime == datetime(2016, 2, 7, 15, 25, 30, tzinfo=tzutc())
assert post.content.strip() == 'Content here\n\nmore here'
| 21.865672
| 76
| 0.694881
| 203
| 1,465
| 4.945813
| 0.20197
| 0.119522
| 0.101594
| 0.062749
| 0.790837
| 0.790837
| 0.790837
| 0.744024
| 0.744024
| 0.718127
| 0
| 0.078203
| 0.179522
| 1,465
| 66
| 77
| 22.19697
| 0.757072
| 0
| 0
| 0.727273
| 0
| 0
| 0.343345
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 1
| 0.068182
| false
| 0
| 0.068182
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
138f970acbed1792052f8a985bddc53915515141
| 1,369
|
py
|
Python
|
flask__webservers/cookies/test.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
flask__webservers/cookies/test.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
flask__webservers/cookies/test.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
session = requests.Session()
rs = session.get('http://127.0.0.1:5001/get-cookies')
print(rs, rs.url)
print(rs.headers)
print(rs.cookies)
print(rs.json())
"""
<Response [200]> http://127.0.0.1:5001/get-cookies
{'Content-Type': 'application/json', 'Content-Length': '3', 'Server': 'Werkzeug/0.15.4 Python/3.7.3', 'Date': 'Wed, 24 Feb 2021 13:28:00 GMT'}
<RequestsCookieJar[]>
{}
"""
print()
rs = session.post('http://127.0.0.1:5001/set-cookies', params=dict(a=123, b=3))
print(rs, rs.url)
print(rs.headers)
print(rs.cookies)
print(rs.json())
"""
<Response [200]> http://127.0.0.1:5001/set-cookies?a=123&b=3
{'Content-Type': 'application/json', 'Content-Length': '17', 'Set-Cookie': 'a=123; Path=/, b=3; Path=/', 'Server': 'Werkzeug/0.15.4 Python/3.7.3', 'Date': 'Wed, 24 Feb 2021 13:28:00 GMT'}
<RequestsCookieJar[<Cookie a=123 for 127.0.0.1/>, <Cookie b=3 for 127.0.0.1/>]>
{'ok': True}
"""
print()
rs = session.get('http://127.0.0.1:5001/get-cookies')
print(rs, rs.url)
print(rs.headers)
print(rs.cookies)
print(rs.json())
"""
<Response [200]> http://127.0.0.1:5001/get-cookies
{'Content-Type': 'application/json', 'Content-Length': '30', 'Server': 'Werkzeug/0.15.4 Python/3.7.3', 'Date': 'Wed, 24 Feb 2021 13:28:00 GMT'}
<RequestsCookieJar[]>
{'a': '123', 'b': '3'}
"""
| 26.326923
| 187
| 0.642075
| 236
| 1,369
| 3.707627
| 0.258475
| 0.112
| 0.045714
| 0.054857
| 0.795429
| 0.774857
| 0.730286
| 0.730286
| 0.691429
| 0.691429
| 0
| 0.133495
| 0.097151
| 1,369
| 51
| 188
| 26.843137
| 0.574434
| 0.03141
| 0
| 0.8
| 0
| 0
| 0.213147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0.7
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
13d50af8fe68859515ba6b641348e627b67c9cb0
| 111
|
py
|
Python
|
ld35/resources.py
|
seventhroot/ld35
|
0bdf3269b3b3a7a884d95c6bae0b1776509c2387
|
[
"MIT"
] | null | null | null |
ld35/resources.py
|
seventhroot/ld35
|
0bdf3269b3b3a7a884d95c6bae0b1776509c2387
|
[
"MIT"
] | null | null | null |
ld35/resources.py
|
seventhroot/ld35
|
0bdf3269b3b3a7a884d95c6bae0b1776509c2387
|
[
"MIT"
] | null | null | null |
from pkg_resources import resource_filename
def get(filename):
return resource_filename('ld35', filename)
| 22.2
| 46
| 0.801802
| 14
| 111
| 6.142857
| 0.714286
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020619
| 0.126126
| 111
| 4
| 47
| 27.75
| 0.865979
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
b92a55fb56c74ee48e73cf980c210d96ab7b524f
| 29
|
py
|
Python
|
cogs/memes/__init__.py
|
TCastus/ASTUSbot
|
348af14a2099e0eb2d69b0502d4c562bc88c72c4
|
[
"MIT"
] | 4
|
2020-06-28T02:30:55.000Z
|
2021-03-22T10:44:26.000Z
|
cogs/memes/__init__.py
|
TCastus/ASTUSbot
|
348af14a2099e0eb2d69b0502d4c562bc88c72c4
|
[
"MIT"
] | 23
|
2020-06-28T01:24:56.000Z
|
2021-09-22T14:13:30.000Z
|
cogs/memes/__init__.py
|
TCastus/ASTUSbot
|
348af14a2099e0eb2d69b0502d4c562bc88c72c4
|
[
"MIT"
] | 3
|
2020-11-09T12:55:27.000Z
|
2020-12-03T12:00:39.000Z
|
from .cog_meme import CogMeme
| 29
| 29
| 0.862069
| 5
| 29
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9545c85158e61d3c849a79fc50debed37cfe984
| 119
|
py
|
Python
|
timetoken/__init__.py
|
mpavelka/python-timetoken
|
6248e438934ffc11e8be5c027bd9bef00a22dab6
|
[
"BSD-3-Clause"
] | null | null | null |
timetoken/__init__.py
|
mpavelka/python-timetoken
|
6248e438934ffc11e8be5c027bd9bef00a22dab6
|
[
"BSD-3-Clause"
] | 2
|
2018-08-14T19:04:28.000Z
|
2018-08-14T19:05:19.000Z
|
timetoken/__init__.py
|
mpavelka/python-timetoken
|
6248e438934ffc11e8be5c027bd9bef00a22dab6
|
[
"BSD-3-Clause"
] | 1
|
2021-02-08T16:31:53.000Z
|
2021-02-08T16:31:53.000Z
|
from .timetoken import TimeToken, TimeTokenException, TimeTokenExpired, InvalidTimeTokenSignature, TimeTokenParseError
| 59.5
| 118
| 0.890756
| 8
| 119
| 13.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 119
| 1
| 119
| 119
| 0.954955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b961bfa0b9e646960be2e2b6bf315fc51315c507
| 64,034
|
py
|
Python
|
main/solutions/find_all_anagrams_in_a_string.py
|
techrabbit58/LeetCode30DaysMay2020Challenge
|
3798c5ce104e806372922a73b5ba66b29fc51dbd
|
[
"Unlicense"
] | 1
|
2020-06-10T10:28:44.000Z
|
2020-06-10T10:28:44.000Z
|
main/solutions/find_all_anagrams_in_a_string.py
|
techrabbit58/LeetCode30DaysMay2020Challenge
|
3798c5ce104e806372922a73b5ba66b29fc51dbd
|
[
"Unlicense"
] | null | null | null |
main/solutions/find_all_anagrams_in_a_string.py
|
techrabbit58/LeetCode30DaysMay2020Challenge
|
3798c5ce104e806372922a73b5ba66b29fc51dbd
|
[
"Unlicense"
] | null | null | null |
"""
Week 3, Day 3: Find All Anagrams in a String
Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
Strings consists of lowercase English letters only and the length of both strings s and p will not be larger than 20,
100.
The order of output does not matter.
E x a m p l e s
Input:
s: "cbaebabacd" p: "abc"
Output:
[0, 6]
Explanation:
The substring with start index = 0 is "cba", which is an anagram of "abc".
The substring with start index = 6 is "bac", which is an anagram of "abc".
---
Input:
s: "abab" p: "ab"
Output:
[0, 1, 2]
Explanation:
The substring with start index = 0 is "ab", which is an anagram of "ab".
The substring with start index = 1 is "ba", which is an anagram of "ab".
The substring with start index = 2 is "ab", which is an anagram of "ab".
---
"""
from collections import Counter
from typing import List
from itertools import islice
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
"""Slow."""
s = list(s)
p = sorted(list(p))
return [j for j in range(len(s) - len(p) + 1) if sorted(s[j:j + len(p)]) == p]
class SolutionV2:
"""Very slow."""
def findAnagrams(self, s: str, p: str) -> List[int]:
starts = [k for k in range(len(s) - len(p) + 1) if s[k] in p]
m = Counter(p)
result = []
for j in starts:
q = Counter(islice(s, j, j + len(p)))
if not (q - m):
result.append(j)
return result
class SolutionV3:
"""
I picked this solution from the discussion. It were contributed by Junaid
Mansuri. It is clever and fast: O(n), and two orders of magnitude faster
than the 1st approach. Great!
"""
def findAnagrams(self, s: str, p: str) -> List[int]:
"""
(1) Handle the corner case 'p is longer than s' by a guard clause.
(2) Accumulate the hash values for s and p for the length of p at the same time.
(3) Handle the common case that the first len(p) characters are already an anagram of p.
(4) For the rest of s, walk through the sliding hash sum, index by index.
(4.1) If there is a match in that 'sliding window' over s, append the start index i to the result.
:param s: a string
:param p: another string, of which anagrams shall be located in s
:return: a list of indexes giving all start points of anagrams of p in s
"""
s_length, p_length, s_hash, p_hash, result = len(s), len(p), 0, 0, []
if p_length > s_length:
return []
for k in range(p_length):
s_hash, p_hash = s_hash + hash(s[k]), p_hash + hash(p[k])
if s_hash == p_hash:
result.append(0)
for k in range(p_length, s_length):
s_hash += hash(s[k]) - hash(s[k - p_length])
if s_hash == p_hash:
result.append(k - p_length + 1)
return result
if __name__ == '__main__':
obj = SolutionV3()
example = 'cbaebabacd'
probe = 'abc'
expected = [0, 6]
print('Example: ', obj.findAnagrams(example, probe), '\nExpected:', expected, '\n')
example = 'abab'
probe = 'ab'
expected = [0, 1, 2]
print('Example: ', obj.findAnagrams(example, probe), '\nExpected:', expected, '\n')
example = 'ababababab'
probe = 'aab'
expected = [0, 2, 4, 6]
print('Example: ', obj.findAnagrams(example, probe), '\nExpected:', expected, '\n')
example = \
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
probe = \
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
expected = [0, 10001]
print('Example: ', obj.findAnagrams(example, probe), '\nExpected:', expected, '\n')
example = \
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
probe = \
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
expected = list(range(10063))
print('Example: ', obj.findAnagrams(example, probe), '\nExpected:', expected, '\n')
# last line of code
| 500.265625
| 20,108
| 0.974654
| 593
| 64,034
| 105.198988
| 0.284992
| 0.000962
| 0.001058
| 0.001683
| 0.013994
| 0.012904
| 0.011814
| 0.010436
| 0.008159
| 0.005611
| 0
| 0.000843
| 0.01785
| 64,034
| 127
| 20,109
| 504.204724
| 0.99108
| 0.025986
| 0
| 0.275862
| 0
| 0
| 0.968434
| 0.966024
| 0
| 1
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.051724
| 0
| 0.224138
| 0.086207
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9ef414657b07927b2a38ab2a073a4c6e7dab280
| 39,698
|
py
|
Python
|
nova/tests/api/openstack/test_server_actions.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | 1
|
2021-11-08T10:11:44.000Z
|
2021-11-08T10:11:44.000Z
|
nova/tests/api/openstack/test_server_actions.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/test_server_actions.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
import base64
import datetime
import json
import stubout
import webob
from nova import context
from nova import utils
from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
from nova.compute import vm_states
from nova.compute import instance_types
import nova.db.api
from nova import test
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
def return_server_by_id(context, id):
return stub_instance(id)
def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
def return_server_with_attributes(**kwargs):
def _return_server(context, id):
return stub_instance(id, **kwargs)
return _return_server
def return_server_with_state(vm_state, task_state=None):
return return_server_with_attributes(vm_state=vm_state,
task_state=task_state)
def return_server_with_uuid_and_state(vm_state, task_state=None):
def _return_server(context, id):
return return_server_with_state(vm_state, task_state)
return _return_server
def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
name=None, vm_state=None, task_state=None):
if metadata is not None:
metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
else:
metadata_items = [{'key':'seq', 'value':id}]
inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": "fake",
"project_id": "fake",
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": "",
"key_data": "",
"vm_state": vm_state or vm_states.ACTIVE,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
"hostname": "",
"host": "",
"instance_type": dict(inst_type),
"user_data": "",
"reservation_id": "",
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"availability_zone": "",
"display_name": name or "server%s" % id,
"display_description": "",
"locked": False,
"metadata": metadata_items,
"access_ip_v4": "",
"access_ip_v6": "",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"virtual_interfaces": [],
}
instance["fixed_ips"] = {
"address": '192.168.0.1',
"floating_ips": [],
}
return instance
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class ServerActionsTest(test.TestCase):
def setUp(self):
self.maxDiff = None
super(ServerActionsTest, self).setUp()
self.flags(verbose=True)
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
self.webreq = common.webob_factory('/v1.0/servers')
def tearDown(self):
self.stubs.UnsetAll()
def test_server_change_password(self):
body = {'changePassword': {'adminPass': '1234pass'}}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
def test_server_change_password_xml(self):
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/xml'
req.body = '<changePassword adminPass="1234pass">'
# res = req.get_response(fakes.wsgi_app())
# self.assertEqual(res.status_int, 501)
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
def test_server_rebuild_accepted(self):
body = {
"rebuild": {
"imageId": 2,
},
}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(res.body, "")
def test_server_rebuild_rejected_when_building(self):
body = {
"rebuild": {
"imageId": 2,
},
}
state = vm_states.BUILDING
new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_server_rebuild_bad_entity(self):
body = {
"rebuild": {
},
}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
def test_resize_bad_flavor_fails(self):
req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
self.assertEqual(self.resize_called, False)
def test_resize_raises_fails(self):
req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
def resize_mock(*args):
raise Exception('hurr durr')
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 500)
def test_confirm_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
self.resize_called = False
def confirm_resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'confirm_resize',
confirm_resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
self.assertEqual(self.resize_called, True)
def test_confirm_resize_server_fails(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
def confirm_resize_mock(*args):
raise Exception('hurr durr')
self.stubs.Set(nova.compute.api.API, 'confirm_resize',
confirm_resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_revert_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(revertResize=None))
self.resize_called = False
def revert_resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'revert_resize',
revert_resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
def test_revert_resize_server_fails(self):
req = self.webreq('/1/action', 'POST', dict(revertResize=None))
def revert_resize_mock(*args):
raise Exception('hurr durr')
self.stubs.Set(nova.compute.api.API, 'revert_resize',
revert_resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_migrate_server(self):
"""This is basically the same as resize, only we provide the `migrate`
attribute in the body's dict.
"""
req = self.webreq('/1/migrate', 'POST')
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
def test_create_backup(self):
"""The happy path for creating backups"""
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(202, response.status_int)
self.assertTrue(response.headers['Location'])
def test_create_backup_admin_api_off(self):
"""The happy path for creating backups"""
self.flags(allow_admin_api=False)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_with_metadata(self):
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(202, response.status_int)
self.assertTrue(response.headers['Location'])
def test_create_backup_with_too_much_metadata(self):
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
for num in range(FLAGS.quota_metadata_items + 1):
body['createBackup']['metadata']['foo%i' % num] = "bar"
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(413, response.status_int)
def test_create_backup_no_name(self):
"""Name is required for backups"""
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'backup_type': 'daily',
'rotation': 1,
},
}
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_no_rotation(self):
"""Rotation is required for backup requests"""
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
},
}
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_no_backup_type(self):
"""Backup Type (daily or weekly) is required for backup requests"""
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'rotation': 1,
},
}
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_bad_entity(self):
self.flags(allow_admin_api=True)
body = {'createBackup': 'go'}
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
class ServerActionsTestV11(test.TestCase):
def setUp(self):
self.maxDiff = None
super(ServerActionsTestV11, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
fakes.stub_out_glance(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(1, None)
self.service.delete_all()
self.sent_to_glance = {}
fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
self.flags(allow_instance_snapshots=True)
def tearDown(self):
self.stubs.UnsetAll()
def test_server_bad_body(self):
body = {}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_unknown_action(self):
body = {'sockTheFox': {'fakekey': '1234'}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(mock_method.instance_id, '1')
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_xml(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = "application/xml"
req.body = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/>"""
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(mock_method.instance_id, '1')
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_not_a_string(self):
body = {'changePassword': {'adminPass': 1234}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_change_password_empty_string(self):
body = {'changePassword': {'adminPass': ''}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_server_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_server_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_reboot_missing_type(self):
body = dict(reboot=dict())
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_accepted_minimum(self):
new_return_server = return_server_with_attributes(image_ref='2')
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
body = json.loads(res.body)
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']), 16)
def test_server_rebuild_rejected_when_building(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
},
}
state = vm_states.BUILDING
new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_server_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
new_return_server = return_server_with_attributes(metadata=metadata)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
"metadata": metadata,
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
body = json.loads(res.body)
self.assertEqual(body['server']['metadata'], metadata)
def test_server_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
"metadata": "stack",
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": 2,
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
body = json.loads(res.body)
self.assertTrue('personality' not in body['server'])
def test_server_rebuild_admin_pass(self):
new_return_server = return_server_with_attributes(image_ref='2')
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
"adminPass": "asdf",
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
body = json.loads(res.body)
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_server_rebuild_server_not_found(self):
def server_not_found(self, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(nova.db.api, 'instance_get', server_not_found)
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
def test_resize_server(self):
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
body_dict = dict(resize=dict(flavorRef="http://localhost/3"))
req.body = json.dumps(body_dict)
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
body_dict = dict(resize=dict())
req.body = json.dumps(body_dict)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_resize_server_no_flavor_ref(self):
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
body_dict = dict(resize=dict(flavorRef=None))
req.body = json.dumps(body_dict)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_confirm_resize_server(self):
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
body_dict = dict(confirmResize=None)
req.body = json.dumps(body_dict)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(nova.compute.api.API, 'confirm_resize', cr_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
self.assertEqual(self.confirm_resize_called, True)
def test_revert_resize_server(self):
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
body_dict = dict(revertResize=None)
req.body = json.dumps(body_dict)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.revert_resize_called, True)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(202, response.status_int)
location = response.headers['Location']
self.assertEqual('http://localhost/v1.1/images/123', location)
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(202, response.status_int)
location = response.headers['Location']
self.assertEqual('http://localhost/v1.1/images/123', location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(FLAGS.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(413, response.status_int)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup(self):
"""The happy path for creating backups"""
self.flags(allow_admin_api=True)
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(202, response.status_int)
self.assertTrue(response.headers['Location'])
class TestServerActionXMLDeserializerV11(test.TestCase):
def setUp(self):
self.deserializer = create_instance_helper.ServerXMLDeserializerV11()
def tearDown(self):
pass
def test_create_image(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
},
}
self.assertEquals(request['body'], expected)
def test_create_image_with_metadata(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="key1">value1</meta>
</metadata>
</createImage>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
"metadata": {"key1": "value1"},
},
}
self.assertEquals(request['body'], expected)
def test_change_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "1234pass",
},
}
self.assertEquals(request['body'], expected)
def test_change_pass_no_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_reboot(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"
type="HARD"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"reboot": {
"type": "HARD",
},
}
self.assertEquals(request['body'], expected)
def test_reboot_no_type(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"
flavorRef="http://localhost/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"resize": {"flavorRef": "http://localhost/flavors/3"},
}
self.assertEquals(request['body'], expected)
def test_resize_no_flavor_ref(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_confirm_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<confirmResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"confirmResize": None,
}
self.assertEquals(request['body'], expected)
def test_revert_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<revertResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"revertResize": None,
}
self.assertEquals(request['body'], expected)
def test_rebuild(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost/images/1">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"name": "new-server-test",
"imageRef": "http://localhost/images/1",
"metadata": {
"My Server Name": "Apache1",
},
"personality": [
{"path": "/etc/banner.txt", "contents": "Mg=="},
],
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
| 34.640489
| 79
| 0.582145
| 4,516
| 39,698
| 4.93822
| 0.069973
| 0.049774
| 0.034528
| 0.046859
| 0.844133
| 0.81799
| 0.794135
| 0.781669
| 0.745841
| 0.71508
| 0
| 0.019334
| 0.283415
| 39,698
| 1,145
| 80
| 34.670742
| 0.764615
| 0.01262
| 0
| 0.675241
| 0
| 0
| 0.20999
| 0.036604
| 0
| 0
| 0
| 0
| 0.096463
| 1
| 0.102894
| false
| 0.042872
| 0.018221
| 0.005359
| 0.133976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a436aee15f54c8a286f878586c1c3ace81544a5
| 156
|
py
|
Python
|
superlists/lists/views.py
|
Alfawuhn/test-driven-python
|
003f9a95ff8b3dd05f5b857a158781d1631f6d10
|
[
"Apache-2.0"
] | 2
|
2015-02-12T04:25:29.000Z
|
2015-02-12T04:25:33.000Z
|
superlists/lists/views.py
|
Alfawuhn/test-driven-python
|
003f9a95ff8b3dd05f5b857a158781d1631f6d10
|
[
"Apache-2.0"
] | null | null | null |
superlists/lists/views.py
|
Alfawuhn/test-driven-python
|
003f9a95ff8b3dd05f5b857a158781d1631f6d10
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
# Create your views here.
def home_page(request):
return HttpResponse('<html><title>To-Do lists</title></html>')
| 22.285714
| 66
| 0.737179
| 22
| 156
| 5.181818
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 156
| 6
| 67
| 26
| 0.838235
| 0.147436
| 0
| 0
| 0
| 0
| 0.29771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e00b802934666522532aa2946ffcc2a8f3b2ae1c
| 16,929
|
py
|
Python
|
pybind/nos/v7_1_0/sflow/collector/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/sflow/collector/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/sflow/collector/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class collector(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-sflow - based on the path /sflow/collector. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__collector_ip_address','__collector_port_number','__use_vrf',)
_yang_name = 'collector'
_rest_name = 'collector'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__collector_ip_address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="collector-ip-address", rest_name="collector-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<ipv4/v6 address>; The IPv4/IPv6 address of the Sflow collector', u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='inet:ip-address', is_config=True)
self.__collector_port_number = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="collector-port-number", rest_name="collector-port-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<1-65535> The port number used by the Sflow collector (default = 6343)', u'cli-drop-node-name': None, u'key-default': u'6343', u'cli-optional-in-sequence': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)
self.__use_vrf = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="use-vrf", rest_name="use-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vrf to use for sending data to the collector (default = mgmt-vrf)', u'cli-optional-in-sequence': None, u'key-default': u'mgmt-vrf', u'cli-expose-key-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'sflow', u'collector']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'sflow', u'collector']
def _get_collector_ip_address(self):
"""
Getter method for collector_ip_address, mapped from YANG variable /sflow/collector/collector_ip_address (inet:ip-address)
"""
return self.__collector_ip_address
def _set_collector_ip_address(self, v, load=False):
"""
Setter method for collector_ip_address, mapped from YANG variable /sflow/collector/collector_ip_address (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_ip_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_ip_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="collector-ip-address", rest_name="collector-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<ipv4/v6 address>; The IPv4/IPv6 address of the Sflow collector', u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_ip_address must be of a type compatible with inet:ip-address""",
'defined-type': "inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="collector-ip-address", rest_name="collector-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<ipv4/v6 address>; The IPv4/IPv6 address of the Sflow collector', u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='inet:ip-address', is_config=True)""",
})
self.__collector_ip_address = t
if hasattr(self, '_set'):
self._set()
def _unset_collector_ip_address(self):
self.__collector_ip_address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="collector-ip-address", rest_name="collector-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<ipv4/v6 address>; The IPv4/IPv6 address of the Sflow collector', u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='inet:ip-address', is_config=True)
def _get_collector_port_number(self):
"""
Getter method for collector_port_number, mapped from YANG variable /sflow/collector/collector_port_number (uint32)
"""
return self.__collector_port_number
def _set_collector_port_number(self, v, load=False):
"""
Setter method for collector_port_number, mapped from YANG variable /sflow/collector/collector_port_number (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_port_number is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_port_number() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="collector-port-number", rest_name="collector-port-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<1-65535> The port number used by the Sflow collector (default = 6343)', u'cli-drop-node-name': None, u'key-default': u'6343', u'cli-optional-in-sequence': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_port_number must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="collector-port-number", rest_name="collector-port-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<1-65535> The port number used by the Sflow collector (default = 6343)', u'cli-drop-node-name': None, u'key-default': u'6343', u'cli-optional-in-sequence': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)""",
})
self.__collector_port_number = t
if hasattr(self, '_set'):
self._set()
def _unset_collector_port_number(self):
self.__collector_port_number = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="collector-port-number", rest_name="collector-port-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<1-65535> The port number used by the Sflow collector (default = 6343)', u'cli-drop-node-name': None, u'key-default': u'6343', u'cli-optional-in-sequence': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)
def _get_use_vrf(self):
"""
Getter method for use_vrf, mapped from YANG variable /sflow/collector/use_vrf (common-def:vrf-name)
"""
return self.__use_vrf
def _set_use_vrf(self, v, load=False):
"""
Setter method for use_vrf, mapped from YANG variable /sflow/collector/use_vrf (common-def:vrf-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_use_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_use_vrf() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="use-vrf", rest_name="use-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vrf to use for sending data to the collector (default = mgmt-vrf)', u'cli-optional-in-sequence': None, u'key-default': u'mgmt-vrf', u'cli-expose-key-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """use_vrf must be of a type compatible with common-def:vrf-name""",
'defined-type': "common-def:vrf-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="use-vrf", rest_name="use-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vrf to use for sending data to the collector (default = mgmt-vrf)', u'cli-optional-in-sequence': None, u'key-default': u'mgmt-vrf', u'cli-expose-key-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)""",
})
self.__use_vrf = t
if hasattr(self, '_set'):
self._set()
def _unset_use_vrf(self):
self.__use_vrf = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="use-vrf", rest_name="use-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vrf to use for sending data to the collector (default = mgmt-vrf)', u'cli-optional-in-sequence': None, u'key-default': u'mgmt-vrf', u'cli-expose-key-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)
collector_ip_address = __builtin__.property(_get_collector_ip_address, _set_collector_ip_address)
collector_port_number = __builtin__.property(_get_collector_port_number, _set_collector_port_number)
use_vrf = __builtin__.property(_get_use_vrf, _set_use_vrf)
_pyangbind_elements = {'collector_ip_address': collector_ip_address, 'collector_port_number': collector_port_number, 'use_vrf': use_vrf, }
| 81.389423
| 984
| 0.689999
| 2,636
| 16,929
| 4.242792
| 0.081563
| 0.011445
| 0.045064
| 0.012876
| 0.820994
| 0.789342
| 0.768866
| 0.751788
| 0.748212
| 0.745351
| 0
| 0.041286
| 0.121507
| 16,929
| 207
| 985
| 81.782609
| 0.710732
| 0.096757
| 0
| 0.457143
| 0
| 0.085714
| 0.451692
| 0.238898
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.057143
| 0
| 0.264286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e02bb86f60c9827be28be736f84f040de29c2e47
| 74
|
py
|
Python
|
notebooks/solutions/indexing_01.py
|
eseiver/Pandas-Tutorial-SciPyConf-2018
|
b6df2be699ea7ca12e2e6a7c7bde12bdc3565d62
|
[
"CC-BY-4.0"
] | 43
|
2018-07-10T18:52:44.000Z
|
2021-05-04T21:26:49.000Z
|
notebooks/solutions/indexing_01.py
|
piyushpathak03/Pandas-Tutorial-SciPyConf-2018
|
fc68001e0a9346d2b9f30a31d0a66d10dde35114
|
[
"CC-BY-4.0"
] | 8
|
2018-06-17T21:47:27.000Z
|
2018-07-11T22:31:17.000Z
|
notebooks/solutions/indexing_01.py
|
piyushpathak03/Pandas-Tutorial-SciPyConf-2018
|
fc68001e0a9346d2b9f30a31d0a66d10dde35114
|
[
"CC-BY-4.0"
] | 47
|
2018-07-06T15:07:23.000Z
|
2020-11-07T07:44:20.000Z
|
flights[(flights.dep.dt.hour <= 6) |
(flights.dep.dt.hour >= 18)]
| 24.666667
| 36
| 0.567568
| 11
| 74
| 3.818182
| 0.545455
| 0.47619
| 0.571429
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.202703
| 74
| 2
| 37
| 37
| 0.661017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0edfb98f8ae5da44620bae4ad4fb6b7054a9c651
| 25
|
py
|
Python
|
05. WINDOWS Python Setup/exe 1.py
|
AnmolTomer/Udemy---Colt-Steele-Modern-Python-Bootcamp-Codebook
|
5073fd92e38d95a1b7ecf3b9effb9c9683ce5ceb
|
[
"MIT"
] | 3
|
2020-06-17T10:05:37.000Z
|
2021-12-14T17:24:21.000Z
|
05. WINDOWS Python Setup/exe 1.py
|
AnmolTomer/Udemy---Colt-Steele-Modern-Python-Bootcamp-Codebook
|
5073fd92e38d95a1b7ecf3b9effb9c9683ce5ceb
|
[
"MIT"
] | null | null | null |
05. WINDOWS Python Setup/exe 1.py
|
AnmolTomer/Udemy---Colt-Steele-Modern-Python-Bootcamp-Codebook
|
5073fd92e38d95a1b7ecf3b9effb9c9683ce5ceb
|
[
"MIT"
] | 4
|
2019-02-28T17:15:46.000Z
|
2020-04-26T05:56:57.000Z
|
print("Cosmic Commander")
| 25
| 25
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 25
| 1
| 25
| 25
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1614955d94b7fae0ed23df30bbf8c52749e926a2
| 139
|
py
|
Python
|
amqpstorm/tests/__init__.py
|
mikemrm/amqpstorm
|
2a4ec4d72a81498e0774deda338f6aaf16570881
|
[
"MIT"
] | null | null | null |
amqpstorm/tests/__init__.py
|
mikemrm/amqpstorm
|
2a4ec4d72a81498e0774deda338f6aaf16570881
|
[
"MIT"
] | null | null | null |
amqpstorm/tests/__init__.py
|
mikemrm/amqpstorm
|
2a4ec4d72a81498e0774deda338f6aaf16570881
|
[
"MIT"
] | null | null | null |
HOST = '127.0.0.1'
USERNAME = 'guest'
PASSWORD = 'guest'
URI = 'amqp://guest:guest@127.0.0.1:5672/%2F'
HTTP_URL = 'http://127.0.0.1:15672'
| 23.166667
| 45
| 0.633094
| 27
| 139
| 3.222222
| 0.518519
| 0.137931
| 0.172414
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 0.107914
| 139
| 5
| 46
| 27.8
| 0.475806
| 0
| 0
| 0
| 0
| 0
| 0.561151
| 0.266187
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
1619933d1891b8ee8970bf6c27f0b566306db31e
| 69
|
py
|
Python
|
cspark/EventTypeRouter.py
|
Matvey-Kuk/spark-python
|
69b8d8c708fd032077dcccb01a8466705b33c4a7
|
[
"MIT"
] | null | null | null |
cspark/EventTypeRouter.py
|
Matvey-Kuk/spark-python
|
69b8d8c708fd032077dcccb01a8466705b33c4a7
|
[
"MIT"
] | 102
|
2017-01-30T05:50:10.000Z
|
2022-03-07T18:56:23.000Z
|
cspark/EventTypeRouter.py
|
Matvey-Kuk/cspark-python
|
69b8d8c708fd032077dcccb01a8466705b33c4a7
|
[
"MIT"
] | null | null | null |
from .Router import Router
class EventTypeRouter(Router):
pass
| 11.5
| 30
| 0.753623
| 8
| 69
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188406
| 69
| 5
| 31
| 13.8
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
165d499eb79bd13ee46ac0f3cbfaa8f76e0dd5d0
| 25
|
py
|
Python
|
game/blenderpanda/__init__.py
|
Kupoman/fafnir-demo
|
1e285296b49f00fa99672a242c8bfc4afd696ff4
|
[
"MIT"
] | 1
|
2017-05-29T23:03:13.000Z
|
2017-05-29T23:03:13.000Z
|
game/blenderpanda/__init__.py
|
Kupoman/fafnir-demo
|
1e285296b49f00fa99672a242c8bfc4afd696ff4
|
[
"MIT"
] | null | null | null |
game/blenderpanda/__init__.py
|
Kupoman/fafnir-demo
|
1e285296b49f00fa99672a242c8bfc4afd696ff4
|
[
"MIT"
] | null | null | null |
from .bpbase import init
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1682ffb1dea352c7f0a2a0679fdd5833491998b9
| 11,227
|
py
|
Python
|
tests/test_api/test_pep8.py
|
Joshua-Enrico/AirBnB_clone_v4
|
c603b7907826b60584597b258f42175d7c6bdd1a
|
[
"MIT"
] | null | null | null |
tests/test_api/test_pep8.py
|
Joshua-Enrico/AirBnB_clone_v4
|
c603b7907826b60584597b258f42175d7c6bdd1a
|
[
"MIT"
] | null | null | null |
tests/test_api/test_pep8.py
|
Joshua-Enrico/AirBnB_clone_v4
|
c603b7907826b60584597b258f42175d7c6bdd1a
|
[
"MIT"
] | 1
|
2021-10-04T19:29:47.000Z
|
2021-10-04T19:29:47.000Z
|
#!/usr/bin/python3
"""
Contains the TestStateDocs classes
"""
from datetime import datetime
import inspect
import models
import os
from models import state
from models.base_model import BaseModel
import pep8
import unittest
from api.v1 import app
from api.v1.views import states as test_state
from api.v1.views import amenities
from api.v1.views import cities
from api.v1.views import index
from api.v1.views import places_reviews
from api.v1.views import places_amenities
from api.v1.views import places
from api.v1.views import users
State = state.State
class TestStateDocs(unittest.TestCase):
"""Tests to check the documentation for all api files"""
@classmethod
def setUpClass(cls):
"""Set up for the doc tests"""
cls.state_f = inspect.getmembers(State, inspect.isfunction)
def test_pep8_conformance_app(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/app.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_states(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/states.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_amenities(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/amenities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_cities(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/cities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places_rev(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places_reviews.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_users(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/users.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_index(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/index.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_pep8(self):
"""Test that tests/test_models/test_state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['tests/test_api/test_pep8.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places_amenities(self):
"""Test that tests/test_models/test_state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places_amenities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
# file docstring
def test_state_module_docstring_app(self):
"""Test for the state.py module docstring"""
self.assertIsNot(app.__doc__, None,
"state.py needs a docstring")
self.assertTrue(len(app.__doc__) >= 1,
"state.py needs a docstring")
def test_state_class_docstring_state(self):
"""Test for the State class docstring"""
self.assertIsNot(test_state.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(test_state.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_amenities(self):
"""Test for the State class docstring"""
self.assertIsNot(amenities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(amenities.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_cities(self):
"""Test for the State class docstring"""
self.assertIsNot(cities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(cities.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_index(self):
"""Test for the State class docstring"""
self.assertIsNot(index.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(index.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_rev(self):
"""Test for the State class docstring"""
self.assertIsNot(places_reviews.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(places_reviews.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_places(self):
"""Test for the State class docstring"""
self.assertIsNot(places.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(places.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_users(self):
"""Test for the State class docstring"""
self.assertIsNot(users.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(users.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_amenities_rev(self):
"""Test for the State class docstring"""
self.assertIsNot(places_amenities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(users.__doc__) >= 1,
"State class needs a docstring")
# dosctring tests
def test_state_func_docstrings(self):
"""Test for the presence of docstrings in State methods"""
for func in self.state_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_index(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(index, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_user(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(users, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_places(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_states(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(test_state, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_place_rev(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places_reviews, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_amenity_rev(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places_amenities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_cities(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(cities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_amenities(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(amenities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
| 45.088353
| 77
| 0.610849
| 1,362
| 11,227
| 4.80837
| 0.069016
| 0.032982
| 0.082455
| 0.038479
| 0.87647
| 0.848679
| 0.82608
| 0.82608
| 0.81982
| 0.783173
| 0
| 0.016867
| 0.281821
| 11,227
| 248
| 78
| 45.270161
| 0.795362
| 0.127104
| 0
| 0.52514
| 0
| 0
| 0.17251
| 0.023029
| 0
| 0
| 0
| 0
| 0.256983
| 1
| 0.162011
| false
| 0
| 0.094972
| 0
| 0.26257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16878f631cd695277870d207fede00a5db727143
| 27
|
py
|
Python
|
tests/sample_CASE.py
|
thomastan/pyexe
|
944a5e09ed2db4b9b7633bb1de77ad3eb777d958
|
[
"Apache-2.0"
] | 47
|
2018-04-13T02:41:48.000Z
|
2022-02-07T15:55:33.000Z
|
tests/sample_CASE.py
|
thomastan/pyexe
|
944a5e09ed2db4b9b7633bb1de77ad3eb777d958
|
[
"Apache-2.0"
] | 17
|
2018-04-09T03:12:43.000Z
|
2021-09-07T06:46:59.000Z
|
tests/sample_CASE.py
|
thomastan/pyexe
|
944a5e09ed2db4b9b7633bb1de77ad3eb777d958
|
[
"Apache-2.0"
] | 11
|
2018-05-31T05:49:52.000Z
|
2021-12-17T06:20:12.000Z
|
print('mixed CASE module')
| 13.5
| 26
| 0.740741
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
16921897190e63fed79fd3e90bd19da13692370c
| 145
|
py
|
Python
|
wagtail/wagtailcore/signals.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2021-09-21T00:06:52.000Z
|
2021-09-21T00:06:52.000Z
|
wagtail/wagtailcore/signals.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T08:25:30.000Z
|
2021-02-24T08:25:30.000Z
|
wagtail/wagtailcore/signals.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2020-11-24T10:21:24.000Z
|
2020-11-24T10:21:24.000Z
|
from django.dispatch import Signal
page_published = Signal(providing_args=['instance'])
page_unpublished = Signal(providing_args=['instance'])
| 24.166667
| 54
| 0.8
| 17
| 145
| 6.588235
| 0.647059
| 0.267857
| 0.339286
| 0.482143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 5
| 55
| 29
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
169be9adaaf883f774b866e1d842ce3657d29c32
| 29,902
|
py
|
Python
|
pypower/case118.py
|
Bengt/PYPOWER
|
78a0f8d4765d147f8237e9a905ef871508ecfee7
|
[
"BSD-3-Clause"
] | 221
|
2015-01-03T23:18:11.000Z
|
2022-03-27T10:21:40.000Z
|
pypower/case118.py
|
Bengt/PYPOWER
|
78a0f8d4765d147f8237e9a905ef871508ecfee7
|
[
"BSD-3-Clause"
] | 33
|
2015-05-12T08:48:02.000Z
|
2021-11-23T10:35:21.000Z
|
pypower/case118.py
|
Bengt/PYPOWER
|
78a0f8d4765d147f8237e9a905ef871508ecfee7
|
[
"BSD-3-Clause"
] | 114
|
2015-02-02T15:07:38.000Z
|
2022-03-22T17:01:55.000Z
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for IEEE 118 bus test case.
"""
from numpy import array
def case118():
"""Power flow data for IEEE 118 bus test case.
Please see L{caseformat} for details on the case file format.
This data was converted from IEEE Common Data Format
(ieee118cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
See end of file for warnings generated during conversion.
Converted from IEEE CDF file from:
U{http://www.ee.washington.edu/research/pstca/}
With baseKV data take from the PSAP format file from the same site,
added manually on 10-Mar-2006.
08/25/93 UW ARCHIVE 100.0 1961 W IEEE 118 Bus Test Case
@return: Power flow data for IEEE 118 bus test case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94],
[2, 1, 20, 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94],
[3, 1, 39, 10, 0, 0, 1, 0.968, 11.56, 138, 1, 1.06, 0.94],
[4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, 1, 1.06, 0.94],
[5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, 0.94],
[6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94],
[7, 1, 19, 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94],
[8, 2, 28, 0, 0, 0, 1, 1.015, 20.77, 345, 1, 1.06, 0.94],
[9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, 345, 1, 1.06, 0.94],
[10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, 0.94],
[11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94],
[12, 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94],
[13, 1, 34, 16, 0, 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94],
[14, 1, 14, 1, 0, 0, 1, 0.984, 11.5, 138, 1, 1.06, 0.94],
[15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1, 1.06, 0.94],
[16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],
[17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94],
[18, 2, 60, 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94],
[19, 2, 45, 25, 0, 0, 1, 0.963, 11.05, 138, 1, 1.06, 0.94],
[20, 1, 18, 3, 0, 0, 1, 0.958, 11.93, 138, 1, 1.06, 0.94],
[21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06, 0.94],
[22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94],
[23, 1, 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94],
[24, 2, 13, 0, 0, 0, 1, 0.992, 20.89, 138, 1, 1.06, 0.94],
[25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, 138, 1, 1.06, 0.94],
[26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06, 0.94],
[27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94],
[28, 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94],
[29, 1, 24, 4, 0, 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94],
[30, 1, 0, 0, 0, 0, 1, 0.968, 18.79, 345, 1, 1.06, 0.94],
[31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138, 1, 1.06, 0.94],
[32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, 0.94],
[33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94],
[34, 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94],
[35, 1, 33, 9, 0, 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94],
[36, 2, 31, 17, 0, 0, 1, 0.98, 10.87, 138, 1, 1.06, 0.94],
[37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138, 1, 1.06, 0.94],
[38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94],
[39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94],
[40, 2, 66, 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94],
[41, 1, 37, 10, 0, 0, 1, 0.967, 6.92, 138, 1, 1.06, 0.94],
[42, 2, 96, 23, 0, 0, 1, 0.985, 8.53, 138, 1, 1.06, 0.94],
[43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06, 0.94],
[44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94],
[45, 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94],
[46, 2, 28, 10, 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94],
[47, 1, 34, 0, 0, 0, 1, 1.017, 20.73, 138, 1, 1.06, 0.94],
[48, 1, 20, 11, 0, 15, 1, 1.021, 19.93, 138, 1, 1.06, 0.94],
[49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138, 1, 1.06, 0.94],
[50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94],
[51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94],
[52, 1, 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94],
[53, 1, 23, 11, 0, 0, 1, 0.946, 14.35, 138, 1, 1.06, 0.94],
[54, 2, 113, 32, 0, 0, 1, 0.955, 15.26, 138, 1, 1.06, 0.94],
[55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138, 1, 1.06, 0.94],
[56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, 0.94],
[57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94],
[58, 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94],
[59, 2, 277, 113, 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94],
[60, 1, 78, 3, 0, 0, 1, 0.993, 23.15, 138, 1, 1.06, 0.94],
[61, 2, 0, 0, 0, 0, 1, 0.995, 24.04, 138, 1, 1.06, 0.94],
[62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, 1.06, 0.94],
[63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],
[64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94],
[65, 2, 0, 0, 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94],
[66, 2, 39, 18, 0, 0, 1, 1.05, 27.48, 138, 1, 1.06, 0.94],
[67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, 138, 1, 1.06, 0.94],
[68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06, 0.94],
[69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94],
[70, 2, 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94],
[71, 1, 0, 0, 0, 0, 1, 0.987, 22.15, 138, 1, 1.06, 0.94],
[72, 2, 12, 0, 0, 0, 1, 0.98, 20.98, 138, 1, 1.06, 0.94],
[73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1, 1.06, 0.94],
[74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94],
[75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94],
[76, 2, 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94],
[77, 2, 61, 28, 0, 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94],
[78, 1, 71, 26, 0, 0, 1, 1.003, 26.42, 138, 1, 1.06, 0.94],
[79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138, 1, 1.06, 0.94],
[80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, 0.94],
[81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94],
[82, 1, 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94],
[83, 1, 20, 10, 0, 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94],
[84, 1, 11, 7, 0, 0, 1, 0.98, 30.95, 138, 1, 1.06, 0.94],
[85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138, 1, 1.06, 0.94],
[86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, 0.94],
[87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94],
[88, 1, 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94],
[89, 2, 0, 0, 0, 0, 1, 1.005, 39.69, 138, 1, 1.06, 0.94],
[90, 2, 163, 42, 0, 0, 1, 0.985, 33.29, 138, 1, 1.06, 0.94],
[91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1, 1.06, 0.94],
[92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],
[93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94],
[94, 1, 30, 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94],
[95, 1, 42, 31, 0, 0, 1, 0.981, 27.67, 138, 1, 1.06, 0.94],
[96, 1, 38, 15, 0, 0, 1, 0.993, 27.51, 138, 1, 1.06, 0.94],
[97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, 1, 1.06, 0.94],
[98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94],
[99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94],
[100, 2, 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94],
[101, 1, 22, 15, 0, 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94],
[102, 1, 5, 3, 0, 0, 1, 0.991, 32.3, 138, 1, 1.06, 0.94],
[103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138, 1, 1.06, 0.94],
[104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, 0.94],
[105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94],
[106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94],
[107, 2, 50, 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94],
[108, 1, 2, 1, 0, 0, 1, 0.967, 19.38, 138, 1, 1.06, 0.94],
[109, 1, 8, 3, 0, 0, 1, 0.967, 18.93, 138, 1, 1.06, 0.94],
[110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, 1.06, 0.94],
[111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],
[112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94],
[113, 2, 6, 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94],
[114, 1, 8, 3, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[115, 1, 22, 7, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, 1.06, 0.94],
[117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],
[118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[18, 0, 0, 50, -16, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19, 0, 0, 24, -8, 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[24, 0, 0, 300, -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[25, 220, 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[40, 0, 0, 300, -300, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[42, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[46, 19, 0, 100, -100, 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49, 204, 0, 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[54, 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[65, 391, 0, 200, -67, 1.005, 100, 1, 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[66, 392, 0, 200, -67, 1.05, 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[69, 516.4, 0, 300, -300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[70, 0, 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[87, 4, 0, 1000, -100, 1.015, 100, 1, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[89, 607, 0, 300, -210, 1.005, 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[91, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[92, 0, 0, 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[99, 0, 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[110, 0, 0, 23, -8, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[111, 36, 0, 1000, -100, 0.98, 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[112, 0, 0, 1000, -100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[113, 0, 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 5, 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 5, 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 6, 0.0119, 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 7, 0.00459, 0.0208, 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 9, 0.00244, 0.0305, 1.162, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360],
[2, 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 12, 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 12, 0.00862, 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 13, 0.02225, 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 14, 0.0215, 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 15, 0.0744, 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360],
[14, 15, 0.0595, 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 16, 0.0212, 0.0834, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 17, 0.0132, 0.0437, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[16, 17, 0.0454, 0.1801, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 18, 0.0123, 0.0505, 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360],
[18, 19, 0.01119, 0.0493, 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 20, 0.0252, 0.117, 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 19, 0.012, 0.0394, 0.0101, 9900, 0, 0, 0, 0, 1, -360, 360],
[20, 21, 0.0183, 0.0849, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[25, 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 28, 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[28, 29, 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 17, 0, 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[8, 30, 0.00431, 0.0504, 0.514, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 30, 0.00799, 0.086, 0.908, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 31, 0.0474, 0.1563, 0.0399, 9900, 0, 0, 0, 0, 1, -360, 360],
[29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0, 1, -360, 360],
[31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 37, 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 37, 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 36, 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 37, 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 37, 0, 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[37, 39, 0.0321, 0.106, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 40, 0.0593, 0.168, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 38, 0.00464, 0.054, 0.422, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 46, 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 47, 0.038, 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 48, 0.0601, 0.189, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 49, 0.0191, 0.0625, 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
[61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
[96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
[101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
[106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[108, 109, 0.0105, 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 110, 0.03906, 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360],
[109, 110, 0.0278, 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 111, 0.022, 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 112, 0.0247, 0.064, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 113, 0.00913, 0.0301, 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 113, 0.0615, 0.203, 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 114, 0.0135, 0.0612, 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 115, 0.0164, 0.0741, 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360],
[114, 115, 0.0023, 0.0104, 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 116, 0.00034, 0.00405, 0.164, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 117, 0.0329, 0.14, 0.0358, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 118, 0.0145, 0.0481, 0.01198, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 118, 0.0164, 0.0544, 0.01356, 9900, 0, 0, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0222222, 20, 0],
[2, 0, 0, 3, 0.117647, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0454545, 20, 0],
[2, 0, 0, 3, 0.0318471, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 1.42857, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.526316, 20, 0],
[2, 0, 0, 3, 0.0490196, 20, 0],
[2, 0, 0, 3, 0.208333, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0645161, 20, 0],
[2, 0, 0, 3, 0.0625, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0255754, 20, 0],
[2, 0, 0, 3, 0.0255102, 20, 0],
[2, 0, 0, 3, 0.0193648, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0209644, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 2.5, 20, 0],
[2, 0, 0, 3, 0.0164745, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0396825, 20, 0],
[2, 0, 0, 3, 0.25, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.277778, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0]
])
return ppc
| 63.621277
| 92
| 0.430239
| 6,846
| 29,902
| 1.878469
| 0.116857
| 0.216485
| 0.221151
| 0.211509
| 0.562519
| 0.515241
| 0.462364
| 0.399067
| 0.343857
| 0.208087
| 0
| 0.575263
| 0.30727
| 29,902
| 469
| 93
| 63.75693
| 0.045573
| 0.041502
| 0
| 0.098824
| 0
| 0
| 0.00119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002353
| false
| 0
| 0.002353
| 0
| 0.007059
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16aeb1434c61ec5f57f1e2b5b5461f487cd35aa7
| 119
|
py
|
Python
|
src/flowket/deepar/ordering/raster.py
|
vigsterkr/FlowKet
|
0d8f301b5f51a1bab83021f10f65cfb5f2751079
|
[
"MIT"
] | 21
|
2019-11-19T13:59:13.000Z
|
2021-12-03T10:26:30.000Z
|
src/flowket/deepar/ordering/raster.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 10
|
2019-11-15T12:07:28.000Z
|
2020-11-07T18:12:18.000Z
|
src/flowket/deepar/ordering/raster.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 11
|
2019-12-09T22:51:17.000Z
|
2021-11-29T22:05:41.000Z
|
import itertools
def raster(input_size):
return itertools.product(*[range(dim_size) for dim_size in input_size])
| 19.833333
| 75
| 0.773109
| 18
| 119
| 4.888889
| 0.666667
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 5
| 76
| 23.8
| 0.854369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
bc4a387faf3f2a1bff14de0ff7a001c24ef769b7
| 29
|
py
|
Python
|
kats/graphics/__init__.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | 1
|
2021-06-22T03:40:33.000Z
|
2021-06-22T03:40:33.000Z
|
kats/graphics/__init__.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | null | null | null |
kats/graphics/__init__.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | null | null | null |
from . import plots # noqa
| 14.5
| 28
| 0.655172
| 4
| 29
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 29
| 1
| 29
| 29
| 0.904762
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc8a6e32b83baae0018d70f7cf794bd1abd1040b
| 24
|
py
|
Python
|
src/puLearning/__init__.py
|
hhelm10/pu-learning
|
eea5097192dbf384832b857d3e062ab2482fd1ae
|
[
"BSD-3-Clause"
] | null | null | null |
src/puLearning/__init__.py
|
hhelm10/pu-learning
|
eea5097192dbf384832b857d3e062ab2482fd1ae
|
[
"BSD-3-Clause"
] | null | null | null |
src/puLearning/__init__.py
|
hhelm10/pu-learning
|
eea5097192dbf384832b857d3e062ab2482fd1ae
|
[
"BSD-3-Clause"
] | null | null | null |
from .puAdapter import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc8abc09da8a75b66d974b4d2a9fe4e5ad26321f
| 110
|
py
|
Python
|
katas/kyu_7/sum_up_the_random_string.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_7/sum_up_the_random_string.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_7/sum_up_the_random_string.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
from re import findall
def sum_from_string(string):
return sum(int(a) for a in findall(r'\d+', string))
| 18.333333
| 55
| 0.7
| 20
| 110
| 3.75
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172727
| 110
| 5
| 56
| 22
| 0.824176
| 0
| 0
| 0
| 0
| 0
| 0.027273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bc98ba8ecc426b87020638640c47d9c4415cf7c1
| 116
|
py
|
Python
|
models/__init__.py
|
RyanWangZf/NRE-IF
|
738126d3ea06b396c67417e684400f510405f319
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
RyanWangZf/NRE-IF
|
738126d3ea06b396c67417e684400f510405f319
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
RyanWangZf/NRE-IF
|
738126d3ea06b396c67417e684400f510405f319
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .PCNN_ONE import PCNN_ONE
from .PCNN_ATT import PCNN_ATT
from .PCNN_IF import PCNN_IF
| 19.333333
| 30
| 0.741379
| 21
| 116
| 3.809524
| 0.428571
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.155172
| 116
| 5
| 31
| 23.2
| 0.806122
| 0.181034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bcce8112a89725621d6bb584a8fded873d43b10f
| 29,448
|
py
|
Python
|
Outputs_win_app.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | null | null | null |
Outputs_win_app.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | null | null | null |
Outputs_win_app.py
|
stfbnc/mtsa_py
|
0dd14f0e51e3251f10b3da781867fbc7173608eb
|
[
"MIT"
] | null | null | null |
import sys
if sys.version_info[0] == 2:
import Tkinter as tk
from tkFileDialog import askdirectory
else:
import tkinter as tk
from tkinter.filedialog import askdirectory
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
### set size for figures
x_size = 7
y_size = 5
#############################################################################
############################## SAVE_FREQ ####################################
#############################################################################
def save_freq(arg0,arg2,arg3,arg4):
path_file = askdirectory()
path_file = os.path.join(path_file,'freq_perc.txt')
f = open(path_file,'w')
f.write('Frequencies for file %s\n\n' % (arg0))
f.write('Filter_round Frequency Period Percentage\n\n')
for i in range(len(arg2)):
f.write('%d %.8f %.2f %.2f\n' % (arg2[i],1.0 / arg3[i],arg3[i],arg4[i]))
f.close()
#############################################################################
#############################################################################
############################## DETREND_PLOT #################################
#############################################################################
def detrend_plot(main_win,arg0,arg1,arg2):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg2,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
plt.plot(t,arg1,'k',label = 'trend')
plt.plot(t,arg0,'b',label = 'detrended')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.xticks(ticks,time_label)
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.savefig(os.path.join(path_tot,'ts.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
a.plot(t,arg1,'k',label = 'trend')
a.plot(t,arg0,'b',label = 'detrended')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get(),fontsize = 15)
a.set_ylabel(entries[5].get(),fontsize = 15)
a.set_title(entries[6].get(),fontsize = 15)
a.set_xticks(ticks_vec)
a.set_xticklabels(time_label)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Time Series")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","X Label","Y Label","Title"]
if not np.isscalar(arg1):
values = [t[0],t[-1],np.min([np.min(arg0[~np.isnan(arg0)]),np.min(arg1[~np.isnan(arg1)]),
np.min(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) - 1.0,
np.max([np.max(arg0[~np.isnan(arg0)]),np.max(arg1[~np.isnan(arg1)]),
np.max(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) + 1.0,'t','$X_t$',
'Time Series']
else:
values = [t[0],t[-1],np.min(arg0[~np.isnan(arg0)]) - 1.0,np.max(arg0[~np.isnan(arg0)]) + 1.0,
't','$X_t$','Time Series']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## SPECTRUM_PLOT ################################
#############################################################################
def spectrum_plot(main_win,arg0,arg1,arg2):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(arg1,arg0,'b')
if arg2 != 0:
plt.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
plt.xlabel(entries[0].get())
plt.ylabel(entries[1].get())
plt.xlim(float(entries[2].get()),float(entries[3].get()))
plt.ylim(float(entries[4].get()),float(entries[5].get()))
plt.title(entries[6].get())
plt.savefig(os.path.join(path_tot,'spectrum_in.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(arg1,arg0,'b')
if arg2 != 0:
a.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
a.set_xlabel(entries[0].get(),fontsize = 15)
a.set_ylabel(entries[1].get(),fontsize = 15)
a.set_xlim(float(entries[2].get()),float(entries[3].get()))
a.set_ylim(float(entries[4].get()),float(entries[5].get()))
a.set_title(entries[6].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
if arg2 != 0:
top.wm_title("Spectrum")
else:
top.wm_title("Spectrum of residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Label","Y Label","X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","Title"]
if arg2 != 0:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum (initial)']
else:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum of residuals']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## RES_PLOT #####################################
#############################################################################
def res_plot(main_win,arg0,arg1,arg2,arg3):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg3,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
pn_norm_notnan = arg2[~np.isnan(arg2)]
outlier_lim = 3.0
num_outliers_max = len(pn_norm_notnan[pn_norm_notnan > outlier_lim])
num_outliers_min = len(pn_norm_notnan[pn_norm_notnan < -outlier_lim])
num_outliers = num_outliers_max + num_outliers_min
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (12,9))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,1,1)
plt.plot(t,arg0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[5].get()),float(entries[6].get()))
plt.xticks(ticks,'')
plt.ylabel(entries[2].get())
plt.title(entries[4].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.subplot(2,1,2)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
plt.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
plt.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
plt.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
plt.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
plt.legend(loc = 0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[7].get()),float(entries[8].get()))
plt.xticks(ticks,time_label)
plt.ylabel(entries[3].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.savefig(os.path.join(path_tot,'res.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(211)
a.plot(t,arg0)
a.set_xlim(int(entries[0].get()),int(entries[1].get()))
a.set_ylim(float(entries[5].get()),float(entries[6].get()))
a.set_xticks(ticks_vec)
a.set_xticklabels('')
a.set_ylabel(entries[2].get(),fontsize = 15)
a.set_title(entries[4].get(),fontsize = 15)
b = fig_ts.add_subplot(212)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
b.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
b.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
b.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
b.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
b.legend(loc = 0)
b.set_xlim(int(entries[0].get()),int(entries[1].get()))
b.set_ylim(float(entries[7].get()),float(entries[8].get()))
b.set_xticks(ticks)
b.set_xticklabels(time_label)
b.set_ylabel(entries[3].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Label (top)","Y Label (bottom)","Title",
"Y1 Limit (bottom)","Y1 Limit (top)","Y2 Limit (bottom)","Y2 Limit (top)"]
values = [t[0],t[-1],'$N_t$','$N_t^{norm}$','Residuals / Normalised residuals',np.min(arg0[~np.isnan(arg0)]) - 10.0,
np.max(arg0[~np.isnan(arg0)]) + 10.0,np.min(arg2[~np.isnan(arg0)]) - 1.0,np.max(arg2[~np.isnan(arg0)]) + 1.0]
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## DFA_PLOT #####################################
#############################################################################
def dfa_plot(main_win,arg0,arg1,arg2,arg3):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(np.log(arg0),np.log(arg1),'o',label = '$H$ = ' + arg3)
plt.plot(np.log(arg0),arg2,'r')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.savefig(os.path.join(path_tot,'dfa.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(np.log(arg0),np.log(arg1),'o',label = '$H$ = ' + arg3)
a.plot(np.log(arg0),arg2,'r')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("DFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","X Label","Y Label","Title"]
values = [np.log(arg0[0]) - 0.3,np.log(arg0[-1]) + 0.3,np.min(np.log(arg1)) - 1.0,np.max(np.log(arg1)) + 1.0,
'log$(F(n))$','log$(n)$','DFA fit']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## MDFA_PLOT ####################################
#############################################################################
def mdfa_plot(main_win,arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7):
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (11,11))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,2,1)
plt.plot(np.log(arg0),np.log(arg1[0,:]),'b.')
plt.plot(np.log(arg0),arg2[:,0],'b',label = 'q = -3')
plt.plot(np.log(arg0),np.log(arg1[50,:]),'r.')
plt.plot(np.log(arg0),arg2[:,50],'r',label = 'q = 0')
plt.plot(np.log(arg0),np.log(arg1[-1,:]),'g.')
plt.plot(np.log(arg0),arg2[:,-1],'g',label = 'q = 3')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.subplot(2,2,2)
plt.plot(arg3,arg4,'b',label = 'h(q)')
plt.plot((arg3[0],arg3[-1]),(arg5,arg5),'k',label = 'H')
plt.legend(loc = 0)
plt.xlim(float(entries[7].get()),float(entries[8].get()))
plt.ylim(float(entries[9].get()),float(entries[10].get()))
plt.xlabel(entries[11].get())
plt.ylabel(entries[12].get())
plt.title(entries[13].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.subplot(2,2,3)
plt.plot(arg6,arg7,'b')
plt.xlim(float(entries[14].get()),float(entries[15].get()))
plt.ylim(float(entries[16].get()),float(entries[17].get()))
plt.xlabel(entries[18].get())
plt.ylabel(entries[19].get())
plt.title(entries[20].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.savefig(os.path.join(path_tot,'mdfa.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(221)
a.plot(np.log(arg0),np.log(arg1[0,:]),'b.')
a.plot(np.log(arg0),arg2[:,0],'b',label = 'q = -3')
a.plot(np.log(arg0),np.log(arg1[50,:]),'r.')
a.plot(np.log(arg0),arg2[:,50],'r',label = 'q = 0')
a.plot(np.log(arg0),np.log(arg1[-1,:]),'g.')
a.plot(np.log(arg0),arg2[:,-1],'g',label = 'q = 3')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
b = fig_ts.add_subplot(222)
b.plot(arg3,arg4,'b',label = 'H(q)')
b.plot((arg3[0],arg3[-1]),(arg5,arg5),'k',label = 'H')
b.legend(loc = 0)
b.set_xlim(float(entries[7].get()),float(entries[8].get()))
b.set_ylim(float(entries[9].get()),float(entries[10].get()))
b.set_xlabel(entries[11].get())
b.set_ylabel(entries[12].get())
b.set_title(entries[13].get())
c = fig_ts.add_subplot(223)
c.plot(arg6,arg7,'b')
c.set_xlim(float(entries[14].get()),float(entries[15].get()))
c.set_ylim(float(entries[16].get()),float(entries[17].get()))
c.set_xlabel(entries[18].get())
c.set_ylabel(entries[19].get())
c.set_title(entries[20].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.75)))
top.wm_title("MFDFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X1 Limit (left)","X1 Limit (right)","Y1 Limit (bottom)","Y1 Limit (top)","X1 Label","Y1 Label","Title1",
"X2 Limit (left)","X2 Limit (right)","Y2 Limit (bottom)","Y2 Limit (top)","X2 Label","Y2 Label","Title2",
"X3 Limit (left)","X3 Limit (right)","Y3 Limit (bottom)","Y3 Limit (top)","X3 Label","Y3 Label","Title3"]
values = [np.log(arg0[0]),np.log(arg0[-1]),np.min([np.min(np.log(arg1[0,:])),np.min(arg2[:,0]),
np.min(np.log(arg1[50,:])),np.min(arg2[:,50]),np.min(np.log(arg1[-1,:])),np.min(arg2[:,-1])]) - 1.0,
np.max([np.max(np.log(arg1[0,:])),np.max(arg2[:,0]),np.max(np.log(arg1[50,:])),np.max(arg2[:,50]),
np.max(np.log(arg1[-1,:])),np.max(arg2[:,-1])]) + 1.0,'log(n)','log(F(n))','MDFA fit',
arg3[0],arg3[-1],np.min(arg4) - 0.1,np.max(arg4) + 0.1,'q','H(q)','Generalised Hurst exponent',
np.min(arg6) - 0.2,np.max(arg6) + 0.2,np.min(arg7) - 0.2,1.2,'$\\alpha$','$f(\\alpha)$',
'Singularity spectrum']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = i,column = 1)
screen_fig()
tk.Label(frame_2,text = "").grid(row = len(names),column = 0)
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = len(names) + 1,column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = len(names) + 1,column = 1)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## MFDFA2_PLOT ##################################
#############################################################################
def MFDFA2_plot(main_win,arg0,arg1,arg2,arg3,arg4,arg5):
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (12,9))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,1,1)
ax = plt.gca()
if int(matplotlib.__version__.split('.')[0]) == 2:
ax.set_facecolor('black')
else:
ax.set_axis_bgcolor('black')
plt.plot(arg0,'y')
plt.plot(0.5 * np.ones((len(arg0),)),'w')
plt.plot(np.ones((len(arg0),)),'m')
plt.plot(1.5 * np.ones((len(arg0),)),'r')
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.3)
plt.subplot(2,1,2)
plt.plot(arg1,arg2,'b',label = '$\mu$ = ' + arg4)
plt.plot(arg1,arg3,'r',linewidth = 2.0,label = '$\sigma$ = ' + arg5)
plt.legend(loc = 0)
plt.xlim(float(entries[7].get()),float(entries[8].get()))
plt.ylim(float(entries[9].get()),float(entries[10].get()))
plt.ylabel(entries[11].get())
plt.xlabel(entries[12].get())
plt.title(entries[13].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.3)
plt.savefig(os.path.join(path_tot,'MFDFA2.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(211)
ax = fig_ts.gca()
if int(matplotlib.__version__.split('.')[0]) == 2:
ax.set_facecolor('black')
else:
ax.set_axis_bgcolor('black')
a.plot(arg0,'y')
a.plot(0.5 * np.ones((len(arg0),)),'w')
a.plot(np.ones((len(arg0),)),'m')
a.plot(1.5 * np.ones((len(arg0),)),'r')
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
b = fig_ts.add_subplot(212)
b.plot(arg1,arg2,'b',label = '$\mu$ = ' + arg4)
b.plot(arg1,arg3,'r',linewidth = 2.0,label = '$\sigma$ = ' + arg5)
b.legend(loc = 0)
b.set_xlim(float(entries[7].get()),float(entries[8].get()))
b.set_ylim(float(entries[9].get()),float(entries[10].get()))
b.set_ylabel(entries[11].get())
b.set_xlabel(entries[12].get())
b.set_title(entries[13].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("DFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X1 Limit (left)","X1 Limit (right)","Y1 Limit (bottom)","Y1 Limit (top)","X1 Label (top)",
"Y1 Label (top)","Title1 (top)","X2 Limit (left)","X2 Limit (right)","Y2 Limit (bottom)",
"Y2 Limit (top)","X2 Label (top)","Y2 Label (bottom)","Title2 (bottom)"]
values = [0,len(arg0),0,3,'time','$H_t$','local Hurst exponent',np.min(arg1) - 0.2,np.max(arg1) + 0.2,
0,np.max(arg2) * 11 / 10,'P($H_t$)','$H_t$','Prob distr of $H_t$']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = i,column = 1)
screen_fig()
tk.Label(frame_2,text = "").grid(row = len(names),column = 0)
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = len(names) + 1,column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = len(names) + 1,column = 1)
tk.Label(frame_2,text = "").grid(row = len(names) + 2,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = len(names) + 3,column = 0)
#############################################################################
| 46.0125
| 124
| 0.51973
| 4,160
| 29,448
| 3.579327
| 0.062019
| 0.058026
| 0.036266
| 0.027401
| 0.868167
| 0.845937
| 0.813633
| 0.776629
| 0.760645
| 0.712962
| 0
| 0.04754
| 0.224973
| 29,448
| 639
| 125
| 46.084507
| 0.604872
| 0.003498
| 0
| 0.65146
| 0
| 0
| 0.081634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04562
| false
| 0
| 0.020073
| 0
| 0.065693
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c18328fc80b46e684fdcbe227c90aa8fd0e06bd
| 29,016
|
py
|
Python
|
openmdao/core/tests/test_des_vars_responses.py
|
Subraiz/OpenMDAO
|
ba247746e76fc3a46b768d0f09955ef58ee71ae4
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/tests/test_des_vars_responses.py
|
Subraiz/OpenMDAO
|
ba247746e76fc3a46b768d0f09955ef58ee71ae4
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/tests/test_des_vars_responses.py
|
Subraiz/OpenMDAO
|
ba247746e76fc3a46b768d0f09955ef58ee71ae4
|
[
"Apache-2.0"
] | null | null | null |
""" Unit tests for the design_variable and response interface to system."""
import unittest
import numpy as np
from openmdao.api import Problem, NonlinearBlockGS, Group, IndepVarComp, ExecComp, ScipyKrylov, \
IndepVarComp, ScipyOptimizeDriver
from openmdao.utils.assert_utils import assert_rel_error
from openmdao.utils.mpi import MPI
from openmdao.test_suite.components.sellar import SellarDerivatives, SellarDis1withDerivatives, \
SellarDis2withDerivatives
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class TestDesVarsResponses(unittest.TestCase):
def test_api_on_model(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1')
prob.model.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj'})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
def test_api_response_on_model(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_response('obj', type_="obj")
prob.model.add_response('con1', type_="con")
prob.model.add_response('con2', type_="con")
prob.setup()
des_vars = prob.model.get_design_vars()
responses = prob.model.get_responses()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj'})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
self.assertEqual(set(responses.keys()), {'obj_cmp.obj', 'con_cmp1.con1', 'con_cmp2.con2'})
def test_api_list_on_model(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=[-100, -20], upper=[100, 20])
prob.model.add_objective('obj')
prob.model.add_constraint('con1')
prob.model.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj',})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
def test_api_array_on_model(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z',
lower=np.array([-100, -20]),
upper=np.array([100, 20]))
prob.model.add_objective('obj')
prob.model.add_constraint('con1')
prob.model.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj',})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
def test_api_iter_on_model(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=range(-101, -99),
upper=range(99, 101),
indices=range(2))
prob.model.add_objective('obj')
prob.model.add_constraint('con1')
prob.model.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj',})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
def test_api_on_subsystems(self):
prob = Problem()
model = prob.model
model.add_subsystem('px', IndepVarComp('x', 1.0))
model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])))
model.add_subsystem('d1', SellarDis1withDerivatives())
model.add_subsystem('d2', SellarDis2withDerivatives())
model.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0))
model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'))
model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'))
model.connect('px.x', ['d1.x', 'obj_cmp.x'])
model.connect('pz.z', ['d1.z', 'd2.z', 'obj_cmp.z'])
model.connect('d1.y1', ['d2.y1', 'obj_cmp.y1', 'con_cmp1.y1'])
model.connect('d2.y2', ['d1.y2', 'obj_cmp.y2', 'con_cmp2.y2'])
model.nonlinear_solver = NonlinearBlockGS()
model.linear_solver = ScipyKrylov()
px = prob.model.px
px.add_design_var('x', lower=-100, upper=100)
pz = prob.model.pz
pz.add_design_var('z', lower=-100, upper=100)
obj = prob.model.obj_cmp
obj.add_objective('obj')
con_comp1 = prob.model.con_cmp1
con_comp1.add_constraint('con1')
con_comp2 = prob.model.con_cmp2
con_comp2.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
obj = prob.model.get_objectives()
constraints = prob.model.get_constraints()
self.assertEqual(set(des_vars.keys()), {'px.x', 'pz.z'})
self.assertEqual(set(obj.keys()), {'obj_cmp.obj',})
self.assertEqual(set(constraints.keys()), {'con_cmp1.con1', 'con_cmp2.con2'})
class TestDesvarOnModel(unittest.TestCase):
def test_design_var_not_exist(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('junk')
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception), "SellarDerivatives (<model>): Output not found for design variable 'junk'.")
def test_desvar_affine_and_scaleradder(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=-100, upper=100, ref=1.0,
scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=-100, upper=100, ref=0.0,
adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=-100, upper=100, ref0=0.0,
adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=-100, upper=100, ref0=0.0,
scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
def test_desvar_affine_mapping(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100, ref0=-100.0,
ref=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1')
prob.model.add_constraint('con2')
prob.setup()
des_vars = prob.model.get_design_vars()
x_ref0 = des_vars['px.x']['ref0']
x_ref = des_vars['px.x']['ref']
x_scaler = des_vars['px.x']['scaler']
x_adder = des_vars['px.x']['adder']
self.assertAlmostEqual( x_scaler*(x_ref0 + x_adder), 0.0, places=12)
self.assertAlmostEqual( x_scaler*(x_ref + x_adder), 1.0, places=12)
def test_desvar_inf_bounds(self):
# make sure no overflow when there is no specified upper/lower bound and significatn scaling
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', scaler=1e6)
prob.model.add_objective('obj', scaler=1e6)
prob.model.add_constraint('con1', scaler=1e6)
prob.model.add_constraint('con2', scaler=1e6)
prob.setup()
des_vars = prob.model.get_design_vars()
self.assertFalse(np.isinf(des_vars['px.x']['upper']))
self.assertFalse(np.isinf(-des_vars['px.x']['lower']))
responses = prob.model.get_responses()
self.assertFalse(np.isinf(responses['con_cmp1.con1']['upper']))
self.assertFalse(np.isinf(responses['con_cmp2.con2']['upper']))
self.assertFalse(np.isinf(-responses['con_cmp1.con1']['lower']))
self.assertFalse(np.isinf(-responses['con_cmp2.con2']['lower']))
def test_desvar_invalid_name(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_design_var(42, lower=-100, upper=100, ref0=-100.0,
ref=100)
self.assertEqual(str(context.exception), 'SellarDerivatives: The name argument should '
'be a string, got 42')
def test_desvar_invalid_bounds(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_design_var('x', lower='foo', upper=[0, 100],
ref0=-100.0, ref=100)
self.assertEqual(str(context.exception), 'Expected values of lower to be an '
'Iterable of numeric values, '
'or a scalar numeric value. '
'Got foo instead.')
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=0.0, upper=['a', 'b'],
ref0=-100.0, ref=100)
class TestConstraintOnModel(unittest.TestCase):
def test_constraint_not_exist(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_constraint('junk')
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception), "SellarDerivatives (<model>): Output not found for response 'junk'.")
def test_constraint_affine_and_scaleradder(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=-100, upper=100, ref=1.0,
scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=-100, upper=100, ref=0.0,
adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('x', lower=-100, upper=100, ref0=0.0,
adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=-100, upper=100, ref0=0.0,
scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
def test_constraint_affine_mapping(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', lower=-100, upper=100, ref0=-100.0,
ref=100)
prob.model.add_constraint('con2')
prob.setup()
constraints = prob.model.get_constraints()
con1_ref0 = constraints['con_cmp1.con1']['ref0']
con1_ref = constraints['con_cmp1.con1']['ref']
con1_scaler = constraints['con_cmp1.con1']['scaler']
con1_adder = constraints['con_cmp1.con1']['adder']
self.assertAlmostEqual( con1_scaler*(con1_ref0 + con1_adder), 0.0,
places=12)
self.assertAlmostEqual( con1_scaler*(con1_ref + con1_adder), 1.0,
places=12)
def test_constraint_invalid_name(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_design_var(42, lower=-100, upper=100, ref0=-100.0,
ref=100)
self.assertEqual(str(context.exception), 'SellarDerivatives: The name argument should '
'be a string, got 42')
def test_constraint_invalid_bounds(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_design_var('x', lower='foo', upper=[0, 100],
ref0=-100.0, ref=100)
self.assertEqual(str(context.exception), 'Expected values of lower to'
' be an Iterable of numeric'
' values, or a scalar numeric'
' value. Got foo instead.')
with self.assertRaises(ValueError) as context:
prob.model.add_design_var('x', lower=0.0, upper=['a', 'b'],
ref0=-100.0, ref=100)
def test_constraint_invalid_name(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_constraint(42, lower=-100, upper=100, ref0=-100.0,
ref=100)
self.assertEqual(str(context.exception), 'SellarDerivatives: The name argument should '
'be a string, got 42')
def test_constraint_invalid_lower(self):
prob = Problem()
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
with self.assertRaises(TypeError) as context:
prob.model.add_constraint('con1', lower='foo', upper=[0, 100],
ref0=-100.0, ref=100)
with self.assertRaises(TypeError) as context2:
prob.model.add_constraint('con1', lower=['zero', 5], upper=[0, 100],
ref0=-100.0, ref=100)
msg = ("Argument 'lower' can not be a string ('foo' given). You can not "
"specify a variable as lower bound. You can only provide constant "
"float values")
self.assertEqual(str(context.exception), msg)
msg2 = ("Argument 'lower' can not be a string ('['zero', 5]' given). You can not "
"specify a variable as lower bound. You can only provide constant "
"float values")
self.assertEqual(str(context2.exception), msg2)
def test_constraint_invalid_upper(self):
prob = Problem()
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
with self.assertRaises(TypeError) as context:
prob.model.add_constraint('con1', lower=0, upper='foo',
ref0=-100.0, ref=100)
with self.assertRaises(TypeError) as context2:
prob.model.add_constraint('con1', lower=0, upper=[1, 'foo'],
ref0=-100.0, ref=100)
msg = ("Argument 'upper' can not be a string ('foo' given). You can not "
"specify a variable as upper bound. You can only provide constant "
"float values")
self.assertEqual(str(context.exception), msg)
msg2 = ("Argument 'upper' can not be a string ('[1, 'foo']' given). You can not "
"specify a variable as upper bound. You can only provide constant "
"float values")
self.assertEqual(str(context2.exception), msg2)
def test_constraint_invalid_equals(self):
prob = Problem()
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
with self.assertRaises(TypeError) as context:
prob.model.add_constraint('con1', equals='foo')
with self.assertRaises(TypeError) as context2:
prob.model.add_constraint('con1', equals=[1, 'two'])
msg = ("Argument 'equals' can not be a string ('foo' given). You can "
"not specify a variable as equals bound. You can only provide "
"constant float values")
self.assertEqual(str(context.exception), msg)
msg2 = ("Argument 'equals' can not be a string ('[1, 'two']' given). You can "
"not specify a variable as equals bound. You can only provide "
"constant float values")
self.assertEqual(str(context2.exception), msg2)
def test_constraint_invalid_indices(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=0.0, upper=5.0,
indices='foo')
self.assertEqual(str(context.exception), 'SellarDerivatives: If specified, response indices must '
'be a sequence of integers.')
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=0.0, upper=5.0,
indices=1)
self.assertEqual(str(context.exception), 'SellarDerivatives: If specified, response indices must '
'be a sequence of integers.')
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=0.0, upper=5.0,
indices=[1, 'k'])
self.assertEqual(str(context.exception), 'SellarDerivatives: If specified, response indices must '
'be a sequence of integers.')
# passing an iterator for indices should be valid
prob.model.add_constraint('con1', lower=0.0, upper=5.0,
indices=range(2))
def test_error_eq_ineq_con(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(ValueError) as context:
prob.model.add_constraint('con1', lower=0.0, upper=5.0, equals=3.0,
indices='foo')
msg = "SellarDerivatives: Constraint 'con1' cannot be both equality and inequality."
self.assertEqual(str(context.exception), msg)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class TestAddConstraintMPI(unittest.TestCase):
N_PROCS = 2
def test_add_bad_con(self):
# From a bug, this message didn't work in mpi.
prob = Problem()
model = prob.model
sub = model.add_subsystem('sub', SellarDerivatives())
sub.nonlinear_solver = NonlinearBlockGS()
sub.add_constraint('d1.junk', equals=0.0, cache_linear_solution=True)
with self.assertRaises(RuntimeError) as context:
prob.setup(mode='rev')
msg = "SellarDerivatives (sub): Output not found for response 'd1.junk'."
self.assertEqual(str(context.exception), msg)
class TestObjectiveOnModel(unittest.TestCase):
def test_obective_not_exist(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_objective('junk')
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"SellarDerivatives (<model>): Output not found for response 'junk'.")
def test_objective_affine_and_scaleradder(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_objective('con1', lower=-100, upper=100, ref=1.0,
scaler=0.5)
self.assertEqual(str(context.exception),
"add_objective() got an unexpected keyword argument 'lower'")
with self.assertRaises(ValueError) as context:
prob.model.add_objective('con1', ref=0.0, scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_objective('con1', ref=0.0, adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_objective('x', ref0=0.0, adder=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
with self.assertRaises(ValueError) as context:
prob.model.add_objective('con1', ref0=0.0, scaler=0.5)
self.assertEqual(str(context.exception), 'Inputs ref/ref0 are mutually'
' exclusive with'
' scaler/adder')
def test_objective_affine_mapping(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj', ref0=1000, ref=1010)
prob.model.add_objective('con2')
prob.setup()
objectives = prob.model.get_objectives()
obj_ref0 = objectives['obj_cmp.obj']['ref0']
obj_ref = objectives['obj_cmp.obj']['ref']
obj_scaler = objectives['obj_cmp.obj']['scaler']
obj_adder = objectives['obj_cmp.obj']['adder']
self.assertAlmostEqual( obj_scaler*(obj_ref0 + obj_adder), 0.0,
places=12)
self.assertAlmostEqual( obj_scaler*(obj_ref + obj_adder), 1.0,
places=12)
def test_desvar_size_err(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
for name in ['lower', 'upper', 'adder', 'scaler', 'ref', 'ref0']:
args = {name: -np.ones(2)*100}
with self.assertRaises(Exception) as context:
prob.model.add_design_var('z', indices=[1], **args)
self.assertEqual(str(context.exception),
"SellarDerivatives: When adding design var 'z', %s should have size 1 but instead has size 2." % name)
def test_constraint_size_err(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
for name in ['lower', 'upper', 'equals', 'adder', 'scaler', 'ref', 'ref0']:
args = {name: -np.ones(2)*100}
with self.assertRaises(Exception) as context:
prob.model.add_constraint('z', indices=[1], **args)
self.assertEqual(str(context.exception),
"SellarDerivatives: When adding constraint 'z', %s should have size 1 but instead has size 2." % name)
def test_objective_size_err(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
for name in ['adder', 'scaler', 'ref', 'ref0']:
args = {name: -np.ones(2)*100}
with self.assertRaises(Exception) as context:
prob.model.add_objective('z', index=1, **args)
self.assertEqual(str(context.exception),
"SellarDerivatives: When adding objective 'z', %s should have size 1 but instead has size 2." % name)
def test_objective_invalid_name(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_objective(42, ref0=-100.0, ref=100)
self.assertEqual(str(context.exception), 'SellarDerivatives: The name argument should '
'be a string, got 42')
def test_objective_invalid_index(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
with self.assertRaises(TypeError) as context:
prob.model.add_objective('obj', index='foo')
self.assertEqual(str(context.exception), 'SellarDerivatives: If specified, objective index must be an int.')
prob.model.add_objective('obj', index=1)
if __name__ == '__main__':
unittest.main()
| 37.879896
| 131
| 0.57613
| 3,223
| 29,016
| 5.05802
| 0.076637
| 0.092749
| 0.061097
| 0.052141
| 0.819961
| 0.798736
| 0.785732
| 0.768188
| 0.741811
| 0.730953
| 0
| 0.034227
| 0.304211
| 29,016
| 765
| 132
| 37.929412
| 0.773243
| 0.008719
| 0
| 0.653772
| 0
| 0.001934
| 0.146032
| 0
| 0
| 0
| 0
| 0
| 0.208897
| 1
| 0.061896
| false
| 0
| 0.015474
| 0
| 0.088975
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c454ec1e13a378a70f673408fb268f085c92162
| 3,682
|
py
|
Python
|
magnolia/python/utils/training.py
|
rashley-iqt/Magnolia
|
123d0fdbe81eb7bb6d76cbe9db6f793fa3105e8a
|
[
"Apache-2.0"
] | 51
|
2016-12-16T04:00:07.000Z
|
2020-11-30T13:26:51.000Z
|
magnolia/python/utils/training.py
|
rashley-iqt/Magnolia
|
123d0fdbe81eb7bb6d76cbe9db6f793fa3105e8a
|
[
"Apache-2.0"
] | 30
|
2016-12-22T22:26:16.000Z
|
2017-12-11T17:28:21.000Z
|
magnolia/python/utils/training.py
|
Lab41/Magnolia
|
123d0fdbe81eb7bb6d76cbe9db6f793fa3105e8a
|
[
"Apache-2.0"
] | 35
|
2016-12-16T04:00:09.000Z
|
2021-03-27T03:04:44.000Z
|
import numpy as np
def preprocess_l41_regression_batch(spec_batch, mask_batch=None, specs_batch=None):
# should be dimensions of (batch size, time frame, frequency)
spec_batch = spec_batch.transpose(0, 2, 1)
scaled_spec_batch = scale_input_spectrogram_for_l41_model(spec_batch)
if mask_batch is not None and specs_batch is None:
# should be dimensions of (batch size, time frame, frequency, source)
mask_batch = mask_batch.transpose(0, 3, 2, 1)
mask_batch = convert_boolean_mask_for_l41_model(mask_batch)
return scaled_spec_batch, mask_batch
if specs_batch is not None and mask_batch is None:
# should be dimensions of (batch size, time frame, frequency, source)
specs_batch = specs_batch.transpose(0, 3, 2, 1)
return scaled_spec_batch, np.abs(specs_batch)
if specs_batch is not None and mask_batch is not None:
# should be dimensions of (batch size, time frame, frequency, source)
mask_batch = mask_batch.transpose(0, 3, 2, 1)
mask_batch = convert_boolean_mask_for_l41_model(mask_batch)
# should be dimensions of (batch size, time frame, frequency, source)
specs_batch = specs_batch.transpose(0, 3, 2, 1)
return scaled_spec_batch, mask_batch, np.abs(specs_batch)
return scaled_spec_batch
def preprocess_chimera_batch(spec_batch, mask_batch=None, specs_batch=None):
# should be dimensions of (batch size, time frame, frequency)
spec_batch = spec_batch.transpose(0, 2, 1)
unscaled_spec_batch = np.abs(spec_batch)
scaled_spec_batch = scale_input_spectrogram_for_l41_model(spec_batch)
if mask_batch is not None and specs_batch is None:
# should be dimensions of (batch size, time frame, frequency, source)
mask_batch = mask_batch.transpose(0, 3, 2, 1)
# mask_batch = convert_boolean_mask_for_chimera_model(mask_batch)
return unscaled_spec_batch, scaled_spec_batch, mask_batch
if specs_batch is not None and mask_batch is None:
# should be dimensions of (batch size, time frame, frequency, source)
specs_batch = specs_batch.transpose(0, 3, 2, 1)
return unscaled_spec_batch, scaled_spec_batch, np.abs(specs_batch)
if specs_batch is not None and mask_batch is not None:
# should be dimensions of (batch size, time frame, frequency, source)
mask_batch = mask_batch.transpose(0, 3, 2, 1)
# mask_batch = convert_boolean_mask_for_chimera_model(mask_batch)
# should be dimensions of (batch size, time frame, frequency, source)
specs_batch = specs_batch.transpose(0, 3, 2, 1)
return unscaled_spec_batch, scaled_spec_batch, mask_batch, np.abs(specs_batch)
return unscaled_spec_batch, scaled_spec_batch
def preprocess_l41_batch(spec_batch, mask_batch=None):
# should be dimensions of (batch size, time frame, frequency)
spec_batch = spec_batch.transpose(0, 2, 1)
spec_batch = scale_input_spectrogram_for_l41_model(spec_batch)
if mask_batch is not None:
# should be dimensions of (batch size, time frame, frequency, source)
mask_batch = mask_batch.transpose(0, 3, 2, 1)
mask_batch = convert_boolean_mask_for_l41_model(mask_batch)
return spec_batch, mask_batch
return spec_batch
def scale_input_spectrogram_for_l41_model(spec_batch):
spec_batch = np.sqrt(np.abs(spec_batch))
return (spec_batch - spec_batch.min())/(spec_batch.max() - spec_batch.min())
def convert_boolean_mask_for_l41_model(mask_batch):
return 2.0*mask_batch.astype(float) - 1.0
# def convert_boolean_mask_for_chimera_model(mask_batch):
# return mask_batch.astype(float)
| 42.813953
| 86
| 0.730038
| 560
| 3,682
| 4.489286
| 0.085714
| 0.139618
| 0.072395
| 0.095465
| 0.920446
| 0.885044
| 0.877884
| 0.877884
| 0.848846
| 0.803103
| 0
| 0.023287
| 0.195274
| 3,682
| 85
| 87
| 43.317647
| 0.825177
| 0.274579
| 0
| 0.511111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.022222
| 0.022222
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c6b0321fcebb0ede9c8cb42b6421abfd8b63fbe
| 7,188
|
py
|
Python
|
main_Experiement_SensitivityAnalysis.py
|
MaximilianJanetschek/Urban_Intermodal_Transportation
|
632caf668636448dc9290d54cf1c7b527c68a957
|
[
"MIT"
] | null | null | null |
main_Experiement_SensitivityAnalysis.py
|
MaximilianJanetschek/Urban_Intermodal_Transportation
|
632caf668636448dc9290d54cf1c7b527c68a957
|
[
"MIT"
] | null | null | null |
main_Experiement_SensitivityAnalysis.py
|
MaximilianJanetschek/Urban_Intermodal_Transportation
|
632caf668636448dc9290d54cf1c7b527c68a957
|
[
"MIT"
] | null | null | null |
from Utilities.Data_Retrieval import *
from Utilities.Requests import *
from Utilities.Parameters import *
from Utilities.Solution_Procedure import *
run_CS = False
run_TS = True
run_PS = False
# beta experiment on Physical sensitive beta setup:
if run_CS:
CSbetaset =[[1,2,3,12,2.5],
[1,2,3,14,2.5],
[1,2,3,17,2.5],
[1,2,3,20,2.5],
[1,2,3,22,2.5],
[1,2,3,24,2.5],
[1,2,3,27,2.5],
[1,2,3,30,2.5],
[1,2,3,34,2.5],
[1,2,3,40,2.5],
[1,2,3,44,2.5],
[1,2,3,54,2.5],
[1,2,3,60,2.5]]
for i in range(0,len(CSbetaset)):
if i >= 0:
# prepare parameters
CaseParameters = Parameters(varCost_Taxi=0.0023, fixCostTaxi=3.9, fixCost_PublicTransport=2.9, fixCost_Bike=1.5,
maxNumber_of_Changes=4, beta=CSbetaset[i], waiting_time_bike=4, waiting_time_drive=4)
# generate a Instance - see class Data Retrieval for detail
BerlinInstance = InstanceNetwork(place='Berlin, Germany', networks=['drive', 'walk', 'bike']) # also possible drive and bike
BerlinInstance.generateMultiModalGraph(parameters=CaseParameters.dictOfParameters)
# get requests
initializeRequests()
requests = getNumberRequests(BerlinInstance, 300)
# run model
total_number = len(requests)
counter = 1
start_time = time.time()
for request_number in range(0, len(requests)):
if request_number >= 271:
request = requests[request_number]
try:
origin_point = (request.get('fromLat'), request.get('fromLon'))
destination_point = (request.get('toLat'), request.get('toLon'))
tourMulti = multi_mode_optimization_in_Arc_fromulation(origin_point, destination_point, BerlinInstance,
CaseParameters.dictOfParameters)
print(str(counter) +' out of ' + str(total_number) + ' requests are calculated')
print("--- %s seconds ---" % round((time.time() - start_time), 2))
except nx.NetworkXNoPath:
print("this does not work")
print(str(counter) + ' out of ' + str(total_number) + ' requests are calculated')
print("--- %s seconds ---" % round((time.time() - start_time), 2))
counter += 1
else:
print('pass set ' + str(i) + ' as already generated')
# beta experiment on Physical sensitive beta setup:
if run_TS:
TSbetaset = [[1,2,3,2,2.5],
# [1,2,3,1.75,2.5],
[1,2,3,1.7,2.5]]
'''
[[1,2,3,9,2.5],
[1,2,3,6,2.5],
[1,2,3,3,2.5],
[1,2,3,1,2.5],
[2,4,6,1,5],
[3,6,9,1,7.5],
[4,8,12,1,10],
[5,10,15,1,12.5],
[6,12,18,1,15]]
'''
for i in range(0,len(TSbetaset)):
# set counter to last finished beta set, set to large number to skip physical test tun
if i >= 0:
# prepare parameters
CaseParameters = Parameters(varCost_Taxi=0.0023, fixCostTaxi=3.9, fixCost_PublicTransport=2.9, fixCost_Bike=1.5,
maxNumber_of_Changes=4, beta=TSbetaset[i], waiting_time_bike=4, waiting_time_drive=4)
# generate a Instance - see class Data Retrieval for detail
BerlinInstance = InstanceNetwork(place='Berlin, Germany', networks=['drive', 'walk', 'bike']) # also possible drive and bike
BerlinInstance.generateMultiModalGraph(parameters=CaseParameters.dictOfParameters)
# get requests
initializeRequests()
requests = getNumberRequests(BerlinInstance, 300)
# run model
total_number = len(requests)
counter = 1
start_time = time.time()
for request in requests:
try:
origin_point = (request.get('fromLat'), request.get('fromLon'))
destination_point = (request.get('toLat'), request.get('toLon'))
tourMulti = multi_mode_optimization_in_Arc_fromulation(origin_point, destination_point, BerlinInstance,
CaseParameters.dictOfParameters)
print(str(counter) +' out of ' + str(total_number) + ' requests are calculated')
print("--- %s seconds ---" % round((time.time() - start_time), 2))
counter += 1
except nx.NetworkXNoPath:
print("this does not work")
# beta experiment on Physical sensitive beta setup:
if run_PS:
PSbetaset =[[1,2,3,12,2.5],
[1,3,3,12,3.75],
[1,5,3,12,6.25],
[1,6,3,12,7.5],
[1,7,3,12,8.75],
[1,8,3,12,10],
[1,9,3,12,11.25],
[1,10,3,12,12.5],
[1,11,3,12,13.75],
[1,12,3,12,15],
[1,14,3,12,17.5],
[1,16,3,12,20],
[1,18,3,12,22.5]]
for i in range(0,len(PSbetaset)):
# set counter to last finished beta set, set to large number to skip physical test tun
if i >= 0:
# prepare parameters
CaseParameters = Parameters(varCost_Taxi=0.0023, fixCostTaxi=3.9, fixCost_PublicTransport=2.9, fixCost_Bike=1.5,
maxNumber_of_Changes=4, beta=PSbetaset[i], waiting_time_bike=4, waiting_time_drive=4)
# generate a Instance - see class Data Retrieval for detail
BerlinInstance = InstanceNetwork(place='Berlin, Germany', networks=['drive', 'walk', 'bike']) # also possible drive and bike
BerlinInstance.generateMultiModalGraph(parameters=CaseParameters.dictOfParameters)
# get requests
initializeRequests()
requests = getNumberRequests(BerlinInstance, 300)
# run model
total_number = len(requests)
counter = 1
start_time = time.time()
for request in requests:
try:
origin_point = (request.get('fromLat'), request.get('fromLon'))
destination_point = (request.get('toLat'), request.get('toLon'))
tourMulti = multi_mode_optimization_in_Arc_fromulation(origin_point, destination_point, BerlinInstance,
CaseParameters.dictOfParameters)
print(str(counter) +' out of ' + str(total_number) + ' requests are calculated')
print("--- %s seconds ---" % round((time.time() - start_time), 2))
counter += 1
except nx.NetworkXNoPath:
print("this does not work")
| 44.925
| 137
| 0.522398
| 825
| 7,188
| 4.455758
| 0.178182
| 0.01197
| 0.017138
| 0.019587
| 0.858814
| 0.839227
| 0.83025
| 0.817193
| 0.805767
| 0.76741
| 0
| 0.07559
| 0.357679
| 7,188
| 159
| 138
| 45.207547
| 0.720598
| 0.100723
| 0
| 0.554545
| 0
| 0
| 0.071094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.009091
| 0.036364
| 0
| 0.036364
| 0.109091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c730fa34525821f2f1eec318b7c26f55243b178
| 25
|
py
|
Python
|
calibration/__init__.py
|
najafian-lab/em-calibration
|
81693ddbf87e642cd66a0b375e25ca378c2752a8
|
[
"MIT"
] | 1
|
2021-07-05T12:48:39.000Z
|
2021-07-05T12:48:39.000Z
|
calibration/__init__.py
|
najafian-lab/em-calibration
|
81693ddbf87e642cd66a0b375e25ca378c2752a8
|
[
"MIT"
] | null | null | null |
calibration/__init__.py
|
najafian-lab/em-calibration
|
81693ddbf87e642cd66a0b375e25ca378c2752a8
|
[
"MIT"
] | null | null | null |
from calibration import *
| 25
| 25
| 0.84
| 3
| 25
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
910232a890e7bcb434bd5f904977f42ab169fbca
| 15,011
|
py
|
Python
|
sdk/python/pulumi_azure/network/network_security_group.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/network_security_group.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/network_security_group.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NetworkSecurityGroup(pulumi.CustomResource):
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
The name of the security rule.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the network security group. Changing this forces a new resource to be created.
"""
security_rules: pulumi.Output[list]
"""
A list of objects representing security rules, as defined below.
* `access` (`str`) - Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`.
* `description` (`str`) - A description for this rule. Restricted to 140 characters.
* `destination_address_prefix` (`str`) - CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `destination_address_prefixes` is not specified.
* `destination_address_prefixes` (`list`) - List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified.
* `destination_application_security_group_ids` (`list`) - A List of destination Application Security Group ID's
* `destination_port_range` (`str`) - Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified.
* `destination_port_ranges` (`list`) - List of destination ports or port ranges. This is required if `destination_port_range` is not specified.
* `direction` (`str`) - The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`.
* `name` (`str`) - The name of the security rule.
* `priority` (`float`) - Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
* `protocol` (`str`) - Network protocol this rule applies to. Can be `Tcp`, `Udp`, `Icmp`, or `*` to match all.
* `source_address_prefix` (`str`) - CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified.
* `source_address_prefixes` (`list`) - List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified.
* `source_application_security_group_ids` (`list`) - A List of source Application Security Group ID's
* `source_port_range` (`str`) - Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified.
* `source_port_ranges` (`list`) - List of source ports or port ranges. This is required if `source_port_range` is not specified.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a network security group that contains a list of network security rules. Network security groups enable inbound or outbound traffic to be enabled or denied.
> **NOTE on Network Security Groups and Network Security Rules:** This provider currently
provides both a standalone Network Security Rule resource, and allows for Network Security Rules to be defined in-line within the Network Security Group resource.
At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_network_security_group = azure.network.NetworkSecurityGroup("exampleNetworkSecurityGroup",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
security_rule=[{
"name": "test123",
"priority": 100,
"direction": "Inbound",
"access": "Allow",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "*",
"sourceAddressPrefix": "*",
"destinationAddressPrefix": "*",
}],
tags={
"environment": "Production",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the security rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the network security group. Changing this forces a new resource to be created.
:param pulumi.Input[list] security_rules: A list of objects representing security rules, as defined below.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **security_rules** object supports the following:
* `access` (`pulumi.Input[str]`) - Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`.
* `description` (`pulumi.Input[str]`) - A description for this rule. Restricted to 140 characters.
* `destination_address_prefix` (`pulumi.Input[str]`) - CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `destination_address_prefixes` is not specified.
* `destination_address_prefixes` (`pulumi.Input[list]`) - List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified.
* `destination_application_security_group_ids` (`pulumi.Input[list]`) - A List of destination Application Security Group ID's
* `destination_port_range` (`pulumi.Input[str]`) - Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified.
* `destination_port_ranges` (`pulumi.Input[list]`) - List of destination ports or port ranges. This is required if `destination_port_range` is not specified.
* `direction` (`pulumi.Input[str]`) - The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`.
* `name` (`pulumi.Input[str]`) - The name of the security rule.
* `priority` (`pulumi.Input[float]`) - Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
* `protocol` (`pulumi.Input[str]`) - Network protocol this rule applies to. Can be `Tcp`, `Udp`, `Icmp`, or `*` to match all.
* `source_address_prefix` (`pulumi.Input[str]`) - CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified.
* `source_address_prefixes` (`pulumi.Input[list]`) - List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified.
* `source_application_security_group_ids` (`pulumi.Input[list]`) - A List of source Application Security Group ID's
* `source_port_range` (`pulumi.Input[str]`) - Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified.
* `source_port_ranges` (`pulumi.Input[list]`) - List of source ports or port ranges. This is required if `source_port_range` is not specified.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['security_rules'] = security_rules
__props__['tags'] = tags
super(NetworkSecurityGroup, __self__).__init__(
'azure:network/networkSecurityGroup:NetworkSecurityGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None):
"""
Get an existing NetworkSecurityGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the security rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the network security group. Changing this forces a new resource to be created.
:param pulumi.Input[list] security_rules: A list of objects representing security rules, as defined below.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **security_rules** object supports the following:
* `access` (`pulumi.Input[str]`) - Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`.
* `description` (`pulumi.Input[str]`) - A description for this rule. Restricted to 140 characters.
* `destination_address_prefix` (`pulumi.Input[str]`) - CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `destination_address_prefixes` is not specified.
* `destination_address_prefixes` (`pulumi.Input[list]`) - List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified.
* `destination_application_security_group_ids` (`pulumi.Input[list]`) - A List of destination Application Security Group ID's
* `destination_port_range` (`pulumi.Input[str]`) - Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified.
* `destination_port_ranges` (`pulumi.Input[list]`) - List of destination ports or port ranges. This is required if `destination_port_range` is not specified.
* `direction` (`pulumi.Input[str]`) - The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`.
* `name` (`pulumi.Input[str]`) - The name of the security rule.
* `priority` (`pulumi.Input[float]`) - Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
* `protocol` (`pulumi.Input[str]`) - Network protocol this rule applies to. Can be `Tcp`, `Udp`, `Icmp`, or `*` to match all.
* `source_address_prefix` (`pulumi.Input[str]`) - CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified.
* `source_address_prefixes` (`pulumi.Input[list]`) - List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified.
* `source_application_security_group_ids` (`pulumi.Input[list]`) - A List of source Application Security Group ID's
* `source_port_range` (`pulumi.Input[str]`) - Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified.
* `source_port_ranges` (`pulumi.Input[list]`) - List of source ports or port ranges. This is required if `source_port_range` is not specified.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = location
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
__props__["security_rules"] = security_rules
__props__["tags"] = tags
return NetworkSecurityGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 77.376289
| 268
| 0.699021
| 1,986
| 15,011
| 5.128399
| 0.123364
| 0.045361
| 0.03299
| 0.037703
| 0.762592
| 0.753265
| 0.746588
| 0.74541
| 0.722337
| 0.7189
| 0
| 0.006188
| 0.21411
| 15,011
| 193
| 269
| 77.777202
| 0.857167
| 0.572314
| 0
| 0.036364
| 1
| 0
| 0.162017
| 0.027182
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0.018182
| 0.109091
| 0.036364
| 0.345455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e68e5d20601732f9ca0628ab99b2fd41f1f6668a
| 124
|
py
|
Python
|
dense_coattn/util/__init__.py
|
yuzhiw/Dense-CoAttention-Network
|
4bd82682b30a471edf19f6d88a87ef4399e7c4ba
|
[
"MIT"
] | 1
|
2018-11-17T13:17:42.000Z
|
2018-11-17T13:17:42.000Z
|
dense_coattn/util/__init__.py
|
yuzhiw/Dense-CoAttention-Network
|
4bd82682b30a471edf19f6d88a87ef4399e7c4ba
|
[
"MIT"
] | null | null | null |
dense_coattn/util/__init__.py
|
yuzhiw/Dense-CoAttention-Network
|
4bd82682b30a471edf19f6d88a87ef4399e7c4ba
|
[
"MIT"
] | null | null | null |
from .utils import Initializer, Drawer, Saver, Timer, Meter
__all__ = ["Initializer", "Drawer", "Saver", "Timer", "Meter"]
| 31
| 62
| 0.693548
| 14
| 124
| 5.857143
| 0.642857
| 0.414634
| 0.536585
| 0.658537
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 4
| 62
| 31
| 0.759259
| 0
| 0
| 0
| 0
| 0
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e6932792f25709082e1721d9dcdabeb173136243
| 31
|
py
|
Python
|
onyx/database/__init__.py
|
mudkipdev/onyx
|
333d23c1f83bb2f69a9f570ce874b9d05dc2edda
|
[
"MIT"
] | null | null | null |
onyx/database/__init__.py
|
mudkipdev/onyx
|
333d23c1f83bb2f69a9f570ce874b9d05dc2edda
|
[
"MIT"
] | null | null | null |
onyx/database/__init__.py
|
mudkipdev/onyx
|
333d23c1f83bb2f69a9f570ce874b9d05dc2edda
|
[
"MIT"
] | null | null | null |
from .guild import CustomGuild
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6a72934634176da3823d49bfb079b1a3e7f34e1
| 93
|
py
|
Python
|
app/genetron/__init__.py
|
cangfengzhe/flask_genetron
|
792a22bc6550c8545e6345c43d7a2c9910ea84be
|
[
"MIT"
] | 1
|
2016-12-12T10:51:15.000Z
|
2016-12-12T10:51:15.000Z
|
app/genetron/__init__.py
|
cangfengzhe/flask_genetron
|
792a22bc6550c8545e6345c43d7a2c9910ea84be
|
[
"MIT"
] | null | null | null |
app/genetron/__init__.py
|
cangfengzhe/flask_genetron
|
792a22bc6550c8545e6345c43d7a2c9910ea84be
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
genetron = Blueprint('genetron', __name__)
from . import views
| 15.5
| 42
| 0.774194
| 11
| 93
| 6.181818
| 0.636364
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 5
| 43
| 18.6
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
e6aac7e4224b78b1796cad164e789d09b47835a3
| 5,028
|
py
|
Python
|
tests/gamestonk_terminal/stocks/options/test_tradier_view.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
tests/gamestonk_terminal/stocks/options/test_tradier_view.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
tests/gamestonk_terminal/stocks/options/test_tradier_view.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.options import tradier_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("Authorization", "MOCK_TOKEN")],
}
@pytest.mark.vcr(record_mode="none")
def test_red_highlight(recorder):
result = tradier_view.red_highlight(val="MOCK TEXT")
recorder.capture(result)
@pytest.mark.vcr(record_mode="none")
def test_green_highlight(recorder):
result = tradier_view.green_highlight(val="MOCK TEXT")
recorder.capture(result)
@pytest.mark.vcr(record_mode="none")
def test_check_valid_option_chains_headers(recorder):
result = tradier_view.check_valid_option_chains_headers(headers="gamma,delta")
recorder.capture(result)
@pytest.mark.default_cassette("test_display_chains")
@pytest.mark.vcr
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"calls_only, puts_only, min_sp, max_sp",
[
(True, False, 80.0, 90.0),
(False, True, 80.0, 90.0),
(True, False, -1, -1),
(False, True, -1, -1),
(False, False, -1, -1),
],
)
def test_display_chains(calls_only, max_sp, min_sp, mocker, puts_only):
# MOCK EXPORT_DATA
mocker.patch(target="gamestonk_terminal.stocks.options.tradier_view.export_data")
# MOCK USE_COLOR
mocker.patch.object(target=tradier_view.gtff, attribute="USE_COLOR", new=True)
tradier_view.display_chains(
ticker="AAPL",
expiry="2022-02-25",
to_display=["volume"],
min_sp=min_sp,
max_sp=max_sp,
calls_only=calls_only,
puts_only=puts_only,
export="",
)
@pytest.mark.default_cassette("test_plot_oi")
@pytest.mark.vcr
@pytest.mark.parametrize(
"calls_only, puts_only, min_sp, max_sp",
[
(True, False, 80.0, 90.0),
(False, True, 80.0, 90.0),
(True, False, -1, -1),
(False, True, -1, -1),
(True, True, -1, -1),
(False, False, -1, -1),
],
)
def test_plot_oi(calls_only, max_sp, min_sp, mocker, puts_only):
# MOCK CHARTS
mocker.patch(
target="gamestonk_terminal.stocks.options.tradier_view.theme.visualize_output"
)
# MOCK EXPORT_DATA
mocker.patch(target="gamestonk_terminal.stocks.options.tradier_view.export_data")
# MOCK USE_COLOR
mocker.patch.object(target=tradier_view.gtff, attribute="USE_COLOR", new=True)
tradier_view.plot_oi(
ticker="AAPL",
expiry="2022-02-25",
min_sp=min_sp,
max_sp=max_sp,
calls_only=calls_only,
puts_only=puts_only,
export="",
)
@pytest.mark.default_cassette("test_plot_oi")
@pytest.mark.vcr
@pytest.mark.parametrize(
"calls_only, puts_only, min_sp, max_sp",
[
(True, False, 80.0, 90.0),
(False, True, 80.0, 90.0),
(True, False, -1, -1),
(False, True, -1, -1),
(True, True, -1, -1),
(False, False, -1, -1),
],
)
def test_plot_vol(calls_only, max_sp, min_sp, mocker, puts_only):
# MOCK CHARTS
mocker.patch(
target="gamestonk_terminal.stocks.options.tradier_view.theme.visualize_output"
)
# MOCK EXPORT_DATA
mocker.patch(target="gamestonk_terminal.stocks.options.tradier_view.export_data")
# MOCK USE_COLOR
mocker.patch.object(target=tradier_view.gtff, attribute="USE_COLOR", new=True)
tradier_view.plot_vol(
ticker="AAPL",
expiry="2022-02-25",
min_sp=min_sp,
max_sp=max_sp,
calls_only=calls_only,
puts_only=puts_only,
export="",
)
@pytest.mark.default_cassette("test_plot_volume_open_interest")
@pytest.mark.vcr
@pytest.mark.parametrize(
"min_sp, max_sp, min_vol",
[
(80.0, 90.0, 0.0),
(-1, -1, -1),
],
)
def test_plot_volume_open_interest(max_sp, min_sp, min_vol, mocker):
# MOCK CHARTS
mocker.patch(
target="gamestonk_terminal.stocks.options.tradier_view.theme.visualize_output"
)
# MOCK EXPORT_DATA
mocker.patch(target="gamestonk_terminal.stocks.options.tradier_view.export_data")
# MOCK USE_COLOR
mocker.patch.object(target=tradier_view.gtff, attribute="USE_COLOR", new=True)
tradier_view.plot_volume_open_interest(
ticker="AAPL",
expiry="2022-02-25",
min_sp=min_sp,
max_sp=max_sp,
min_vol=min_vol,
export="",
)
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_display_historical(mocker):
# MOCK CHARTS
mocker.patch(
target="gamestonk_terminal.stocks.options.tradier_view.theme.visualize_output"
)
# MOCK EXPORT_DATA
mocker.patch(target="gamestonk_terminal.stocks.options.tradier_view.export_data")
# MOCK USE_COLOR
mocker.patch.object(target=tradier_view.gtff, attribute="USE_COLOR", new=True)
tradier_view.display_historical(
ticker="AAPL",
expiry="2022-02-25",
strike=180.0,
put=True,
export="csv",
raw=True,
chain_id="",
)
| 26.324607
| 86
| 0.654733
| 671
| 5,028
| 4.645306
| 0.14307
| 0.081168
| 0.026949
| 0.096246
| 0.863009
| 0.784729
| 0.758422
| 0.735964
| 0.725056
| 0.718961
| 0
| 0.028456
| 0.210223
| 5,028
| 190
| 87
| 26.463158
| 0.756485
| 0.04992
| 0
| 0.631206
| 0
| 0
| 0.206092
| 0.12521
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.014184
| 0.007092
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e6d6f73ffc83062984873b9396f8302281a543a8
| 113,559
|
py
|
Python
|
onnxoptimizer/test/optimizer_test.py
|
462630221/optimizer
|
8f5a6e94ae841a0ac7431d339e3c290884ab02f5
|
[
"Apache-2.0"
] | 1
|
2021-02-20T07:33:01.000Z
|
2021-02-20T07:33:01.000Z
|
onnxoptimizer/test/optimizer_test.py
|
462630221/optimizer
|
8f5a6e94ae841a0ac7431d339e3c290884ab02f5
|
[
"Apache-2.0"
] | null | null | null |
onnxoptimizer/test/optimizer_test.py
|
462630221/optimizer
|
8f5a6e94ae841a0ac7431d339e3c290884ab02f5
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from typing import Sequence, Text, Any, Tuple, List, Callable, Optional, Dict, Union
import io
import unittest
import os
import numpy as np # type: ignore
try:
import torch
import torchvision as tv
has_tv = True
except:
has_tv = False
import onnx
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, shape_inference
from onnx import numpy_helper
from onnx.numpy_helper import to_array
try:
import onnxruntime as rt
has_ort = True
except:
has_ort = False
import onnxoptimizer
TensorShape = List[int]
TensorShapes = Dict[Optional[str], TensorShape]
LATEST_STABLE_OPSET_VERSION = 13
class TestOptimizer(unittest.TestCase):
def _compare(self, model_opt: onnx.ModelProto, model_ori: onnx.ModelProto, n_times: int = 5,
input_shapes: Optional[TensorShapes] = None, verbose=True) -> bool:
"""
:param input_shapes: Shapes of generated random inputs
:param model_opt: The simplified ONNX model
:param model_ori: The original ONNX model
:param n_times: Generate n random inputs
"""
def get_shape_from_value_info_proto(v: onnx.ValueInfoProto) -> List[int]:
return [dim.dim_value for dim in v.type.tensor_type.shape.dim]
def get_value_info_all(m: onnx.ModelProto, name: str) -> Optional[onnx.ValueInfoProto]:
for v in m.graph.value_info:
if v.name == name:
return v
for v in m.graph.input:
if v.name == name:
return v
for v in m.graph.output:
if v.name == name:
return v
return None
def get_shape(m: onnx.ModelProto, name: str) -> TensorShape:
"""
Note: This method relies on onnx shape inference, which is not reliable. So only use it on input or output tensors
"""
v = get_value_info_all(m, name)
if v is not None:
return get_shape_from_value_info_proto(v)
raise RuntimeError('Cannot get shape of "{}"'.format(name))
def get_elem_type(m: onnx.ModelProto, name: str) -> Optional[int]:
v = get_value_info_all(m, name)
if v is not None:
return v.type.tensor_type.elem_type
return None
def get_np_type_from_elem_type(elem_type: int) -> int:
sizes = (None, np.float32, np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, str, np.bool,
np.float16, np.double, np.uint32, np.uint64, np.complex64, np.complex128, np.float16)
assert len(sizes) == 17
size = sizes[elem_type]
assert size is not None
return size
def get_input_names(model: onnx.ModelProto) -> List[str]:
input_names = list(set([ipt.name for ipt in model.graph.input])
- set([x.name for x in model.graph.initializer]))
return input_names
def generate_rand_input(model, input_shapes: Optional[TensorShapes] = None):
if input_shapes is None:
input_shapes = {}
input_names = get_input_names(model)
full_input_shapes = {ipt: get_shape(
model, ipt) for ipt in input_names}
assert None not in input_shapes
full_input_shapes.update(input_shapes) # type: ignore
for key in full_input_shapes:
if np.prod(full_input_shapes[key]) <= 0:
raise RuntimeError(
'The shape of input "{}" has dynamic size, '
'please set an input shape manually'.format(key))
inputs = {ipt: np.array(np.random.rand(*full_input_shapes[ipt]),
dtype=get_np_type_from_elem_type(get_elem_type(model, ipt))) for ipt in
input_names}
return inputs
def forward(model, inputs=None, input_shapes: Optional[TensorShapes] = None) -> Dict[str, np.ndarray]:
if input_shapes is None:
input_shapes = {}
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)
sess_options.log_severity_level = 3
sess = rt.InferenceSession(model.SerializeToString(
), sess_options=sess_options, providers=['CPUExecutionProvider'])
if inputs is None:
inputs = generate_rand_input(model, input_shapes=input_shapes)
outputs = [x.name for x in sess.get_outputs()]
run_options = rt.RunOptions()
run_options.log_severity_level = 3
res = OrderedDict(zip(outputs, sess.run(
outputs, inputs, run_options=run_options)))
return res
if input_shapes is None:
input_shapes = {}
onnx.checker.check_model(model_opt)
for i in range(n_times):
rand_input = generate_rand_input(
model_opt, input_shapes=input_shapes)
res_ori = forward(model_ori, inputs=rand_input)
res_opt = forward(model_opt, inputs=rand_input)
for name in res_opt.keys():
if not np.allclose(res_opt[name], res_ori[name], rtol=1e-4, atol=1e-5):
if verbose:
print("Tensor {} changes after optimization. The max diff is {}.".format(
name, np.max(np.abs(res_opt[name] - res_ori[name]))))
print("After optimization:")
print(res_opt[name])
print("Before optimization:")
print(res_ori[name])
print("----------------")
return False
return True
# type: (Union[GraphProto, ModelProto], Sequence[Text], bool, **Any) -> ModelProto
def _optimized(self, graph_or_model, opts, fixed_point=False, compare_result=True, **kwargs):
if isinstance(graph_or_model, ModelProto):
orig_model = graph_or_model
else:
opset_imports = kwargs.pop('opset_imports', None)
if opset_imports is None:
opset_imports = [helper.make_opsetid("", LATEST_STABLE_OPSET_VERSION)]
orig_model = helper.make_model(
graph_or_model, producer_name='onnx-test', opset_imports=opset_imports, **kwargs)
checker.check_model(orig_model)
optimized_model = onnxoptimizer.optimize(orig_model, opts, fixed_point)
checker.check_model(optimized_model)
if compare_result and len(optimized_model.graph.node) > 0:
if has_ort:
assert self._compare(optimized_model, orig_model)
else:
print("Skip onnxruntime test because it is not installed.")
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
# NOTE(daquexian): only values that change across loop iterations should be in `input_types` and `output_types`. The pseudocode showing how loop op works is:
# loop_value_inputs = graph_value_inputs
# while cond:
# loop_value_outputs = body(loop_value_inputs)
# loop_value_inputs = loop_value_outputs
# graph_value_outputs = loop_value_outputs
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
input_types,
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types,
check_legality=True,
): # type: (...) -> List[NodeProto]
if check_legality:
assert len(input_types) == len(output_types)
zero = helper.make_tensor(
"trip_count_value", TensorProto.INT64, (), [1])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT64, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info(
"cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
# type: (GraphProto, Callable[[NodeProto], None]) -> None
def _visit_all_nodes_recursive(self, graph, fn):
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_get_available_passes(self): # type: () -> None
# FIXME does not guarantees to be listing all
graph = helper.make_graph([], "dummy_graph", [], [])
list_of_passes = onnxoptimizer.get_available_passes()
assert isinstance(list_of_passes, (list)) and len(list_of_passes) > 0
for pass_name in list_of_passes:
# If pass_name is invalid it throws a RuntimeError
self._optimized(graph, [pass_name])
def test_eliminate_identity_single_use(self): # type: () -> None
nodes = [helper.make_node("Add", ["X", "Y"], ["A"]),
helper.make_node("Identity", ["A"], ["B"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_B"], ["_B2"])],
[(TensorProto.FLOAT, (5,), "B")],
[(TensorProto.FLOAT, (5,), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("B2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node): # type: (NodeProto) -> None
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "B"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_B2"
def test_eliminate_identity_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
identity = helper.make_node("Identity", ["A"], ["B"])
graph = helper.make_graph(
[add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(
optimized_model.graph.output) == 1 and optimized_model.graph.output[0].name == 'B'
assert len(optimized_model.graph.node) == 1
def test_eliminate_identity_multiple_uses(self): # type: () -> None
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_not_fuse_non_nop_flatten(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=2)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10, 3, 1, 1))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (10, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Flatten'
def test_nop_flatten_axis0_graph_output(self):
add = helper.make_node("Add", ["X", "Y"], ["A"])
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[add, flatten],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 10)),
],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))],
# the tensor_value_info of "A" is necessary to this optimizer
value_info=[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10))]
)
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Add'
def test_nop_flatten_axis0(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 10))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_nop_flatten_axis1(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=1)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_eliminate_duplicate_initializer(self): # type: () -> None
add_1 = helper.make_node("Add", ["A", "I_0"], ["B"])
add_2 = helper.make_node("Add", ["B", "I_1"], ["C"])
i = np.random.rand(5).astype(np.float32)
graph = helper.make_graph(
[add_1, add_2],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_1", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (5,))],
[helper.make_tensor("I_0", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True),
helper.make_tensor("I_1", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_duplicate_initializer"])
assert len(optimized_model.graph.node) == 2
assert len(optimized_model.graph.initializer) == 1
assert len(optimized_model.graph.input) == 2
assert optimized_model.graph.node[0].input[1] == "I_0"
def test_nop_cast(self): # type: () -> None
cast = helper.make_node("Cast", ["A"], ["B"], to=TensorProto.FLOAT)
graph = helper.make_graph(
[cast],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_cast"])
assert len(optimized_model.graph.node) == 0
def test_nop_transpose_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans = helper.make_node("Transpose", ["A"], ["B"], perm=[0, 1])
graph = helper.make_graph(
[add, trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Identity", ["A"], ["X"]),
helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "Y"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_Y2"
def test_nop_transpose_default(self): # type: () -> None
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_nop_pad_opset10(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 0, 0])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))])
assert len(graph.node) == 1
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
pad = helper.make_node("Pad", ["A", "Pads"], ["B"])
graph = helper.make_graph(
[add, pad],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (2,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(2,),
vals=np.array([0, 0]).astype(
np.int64).tobytes(),
raw=True)])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.node) == 1
def test_nop_pad(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X", "Pads"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
assert len(graph.node) == 1
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_default_opset10(self): # type: () -> None
trans = helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 1, 1])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))])
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_nop_pad_default(self): # type: () -> None
trans = helper.make_node("Pad", ["X", "Pads"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 1, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_eliminate_unused_initializer(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
def test_eliminate_unused_initializer_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
assert len(optimized_model.graph.input) == 2
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used_default(self):
add = helper.make_node("Add", ["X", "A"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used(self):
nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Add", ["_X", "A"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 2), "X")],
[(TensorProto.FLOAT, (1, 2), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
# Add, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Add"
assert optimized_model.graph.output[0].name == "Z"
# Add
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_output(self):
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
assert "Z" in [o.name for o in optimized_model.graph.output]
def test_extract_constant_to_initializer(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(
graph, ["extract_constant_to_initializer"])
self.assertEqual(len(optimized_model.graph.initializer), 1)
init = optimized_model.graph.initializer[0]
self.assertEqual(init.name, 'A')
self.assertEqual(init.dims, [16, 1, 1])
self.assertEqual(init.data_type, TensorProto.FLOAT)
self.assertEqual(
[n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
def test_fuse_concats(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=0),
helper.make_node("Concat", ["X", "G", "Y"], ["Z"], axis=0)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("G", TensorProto.FLOAT, (4, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (22, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"], True) # two passes are needed to simplify the graph to its simplest state.
assert len(optimized_model.graph.node) == 1
assert len(optimized_model.graph.node[0].input) == 7
assert optimized_model.graph.node[0].input == [
"A", "B", "C", "G", "D", "E", "F"]
assert optimized_model.graph.node[0].op_type == "Concat"
def test_fuse_concats_different_axis(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=1),
helper.make_node("Concat", ["X", "Y"], ["Z"], axis=2)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 9, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (8, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (8, 9, 8))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"])
assert optimized_model.graph == graph
def test_fuse_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3, 4), "X")],
[(TensorProto.FLOAT, (2, 4, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
original_model = helper.make_model(graph)
shape_inference.infer_shapes(original_model)
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
shape_inference.infer_shapes(optimized_model)
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans1 = helper.make_node("Transpose", ["A"], ["B"])
trans2 = helper.make_node("Transpose", ["B"], ["C"])
graph = helper.make_graph(
[add, trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_fuse_transpose_default(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (3, 5), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_with_scalar_bias(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, ())],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Unsqueeze, Conv
assert len(optimized_model.graph.node) == 4
assert optimized_model.graph.node[0].op_type == 'Unsqueeze'
assert optimized_model.graph.node[1].op_type == 'Constant'
assert optimized_model.graph.node[2].op_type == 'Tile'
assert optimized_model.graph.node[3].op_type == 'Conv'
def test_fuse_add_bias_into_conv_use_weight_shape(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
# FIXME(daquexian): It looks like subgraph cannot get value info from parent subgraph
# nodes.extend(self._make_fake_loop_op(
# [helper.make_node("Conv", ["_X", "Y"], ["_Z"]),
# helper.make_node("Add", ["_Z", "A"], ["_B2"])],
# [(TensorProto.FLOAT, (1, 5, 3, 3), "X")],
# [(TensorProto.FLOAT, (1, 16, 1, 1), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# # Squeeze, Conv, Constant (trip count), Constant (condition), Loop
# assert len(list(optimized_model.graph.node)) == 5
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
# # Squeeze, Conv
# assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
# assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
# assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
# # Output 1 since 0 is 'cond'
# assert optimized_model.graph.node[4].attribute[0].g.output[1].name == 'B2'
# type: () -> None
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(
optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
def test_fuse_add_bias_into_conv_use_conv_shape(self): # type: () -> None
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info(
"N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_use_move_constant(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_matmul_add_bias_into_gemm(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_same_shape(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (32, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_bcast_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_matmul_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (3, 3))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 3))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_bias_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
# 3d bias for 2d matmul is not legal. So disable onnxruntime checking
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"], compare_result=False)
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_multiple_use_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
identity = helper.make_node("Identity", ["Z"], ["A1"])
add = helper.make_node("Add", ["Z", "B"], ["A2"])
graph = helper.make_graph(
[matmul, add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A1", TensorProto.FLOAT, (32, 16)),
helper.make_tensor_value_info("A2", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_no_optional_value_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_no_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_with_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
vals=np.array([0]).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_with_nonzero_optional_value(self):
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
# non-zero Constant_value -> so no pad
vals=np.array([25]).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_1d_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 1, 0, 0, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
def test_fuse_pad_into_conv_1d(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (6,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(6,),
vals=np.array([0, 0, 1, 0, 0, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
# type: () -> None
def test_fuse_pad_into_conv_existing_conv_pad_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
def test_fuse_pad_into_conv_existing_conv_pad(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_pad_feature_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 1, 0, 0, 0, 0, 0, 0]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_pad_feature_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 1, 0, 0, 0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_negative_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, -1, -1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_negative_pad_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array(
[0, 0, 0, 0, 0, 0, -1, -1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="reflect",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse(self):
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="reflect"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_consecutive_squeezes(self): # type: () -> None
nodes = [helper.make_node("Squeeze", ["X", "X_axes"], ["Y"]),
helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Squeeze", ["_X", "X_axes"], ["_Y"]),
helper.make_node("Squeeze", ["_Y", "Y_axes"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9), "X")],
[(TensorProto.FLOAT, (2, 3, 1, 8, 9), "Z2")]))
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2])],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (2, 3, 1, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
# Squeeze, Constant (trip count), Constant (cond), Loop
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 4
def test_fuse_consecutive_squeezes_default(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
squeeze3 = helper.make_node("Squeeze", ["Z", "Z_axes"], ["A"])
nodes = [squeeze1, squeeze2, squeeze3]
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)),
('Z_axes', np.array([2], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
helper.make_tensor_value_info("Z_axes", TensorProto.INT64, [1])],
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (2, 3, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6, 7]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_random(self): # type: () -> None
x_shape = [1, 1, 1, 3, 4, 1, 6, 1, 1, 9]
s1_one_indices = [i for i, a in enumerate(x_shape) if a == 1]
s1_axes = np.random.choice(s1_one_indices,
size=np.random.randint(
low=1, high=len(s1_one_indices) - 1),
replace=False).astype(np.int64)
s2_x_shape = [a for i, a in enumerate(x_shape) if i not in s1_axes]
s2_one_indices = [i for i, a in enumerate(s2_x_shape) if a == 1]
s2_axes = np.array(s2_one_indices).astype(np.int64)
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', s1_axes),
('Y_axes', s2_axes)]
]
nodes = [squeeze1, squeeze2]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, x_shape),
helper.make_tensor_value_info(
"X_axes", TensorProto.INT64, s1_axes.shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, s2_axes.shape)],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (3, 4, 6, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 2, 5, 7, 8]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_multi_uses(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
add = helper.make_node("Add", ["Y", "A"], ["Z2"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)), ]
]
graph = helper.make_graph(
[squeeze1, add, squeeze2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (1, 2, 3, 1, 1, 8, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert optimized_model.graph.node[2].op_type == "Squeeze"
assert optimized_model.graph.node[2].input[0] == "X"
assert len(list(optimized_model.graph.node)) == 3
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [
0, 4, 5]
if init.name == optimized_model.graph.node[2].input[1]:
assert list(to_array(init)) == [
0, 1, 4, 5, 6]
def test_fuse_consecutive_softmax_log_axis(self): # type: () -> None
for axis in range(3):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=axis)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
def test_fuse_consecutive_softmax_log_side_effect(self): # type: () -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert graph == optimized_model.graph
# type: () -> None
def test_fuse_consecutive_softmax_log_multiple_out(self):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
graph = helper.make_graph(
[softmax, log, exp],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert len(optimized_model.graph.output) == 2
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.output[1].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Exp"
def test_preserve_value_info(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self): # type: () -> None
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
# 'lift_lexical_references' is legacy code and I don't know how it works.
# More error occurs if I make this loop op legal.
# So don't check legality here
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")],
check_legality=False))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "lift_lexical_references" pass produces a graph that does not conform to
# the ONNX spec. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
def test_fuse_bn_into_conv_simple(self): # type: () -> None
for (tensor_type, np_type) in [(TensorProto.FLOAT, np.float32)]:
conv = helper.make_node("Conv", ["X", "W", "B"], ["Y"])
bn = helper.make_node("BatchNormalization", [
"Y", "scale", "b", "mean", "var"], ["Z"])
W = np.random.randn(3, 2, 5, 5).astype(np_type) + 2
B = np.random.randn(3,).astype(np_type) + 2
scale = np.random.randn(3,).astype(np_type) + 2
b = np.random.randn(3,).astype(np_type) + 2
mean = np.random.randn(3,).astype(np_type) + 2
var = np.abs(np.random.randn(3,).astype(np_type)) + 2
initializers = [
helper.make_tensor(name, tensor_type,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('W', W), ('B', B), ('scale', scale), ('b', b), ('mean', mean), ('var', var)]
]
graph = helper.make_graph(
[conv, bn],
"test",
[helper.make_tensor_value_info("X", tensor_type, (5, 2, 28, 28))],
[helper.make_tensor_value_info(
"Z", tensor_type, (5, 3, 24, 24))],
initializer=initializers,
value_info=[
helper.make_tensor_value_info(
"Y", tensor_type, (5, 3, 24, 24))
]
)
optimized_model = self._optimized(graph, ["fuse_bn_into_conv"])
self.assertEqual(len(optimized_model.graph.node), 1)
self.assertEqual(optimized_model.graph.node[0].op_type, 'Conv')
self.assertEqual(len(optimized_model.graph.initializer), 2)
new_W = numpy_helper.to_array(optimized_model.graph.initializer[0])
new_b = numpy_helper.to_array(optimized_model.graph.initializer[1])
f = scale / np.sqrt(var + 1e-5)
np.testing.assert_almost_equal((B - mean) * f + b, new_b)
np.testing.assert_almost_equal(
W * f[:, np.newaxis, np.newaxis, np.newaxis], new_W)
def _internal_test_deadend_elimination(self, fixed): # type: (bool) -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
exp1 = helper.make_node("Log", ["Z"], ["Z2"])
exp2 = helper.make_node("Sqrt", ["Z1"], ["Z3"])
graph = helper.make_graph(
[softmax, log, exp, exp1, exp2],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_deadend"], fixed)
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "Softmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Log"
def test_deadend_elimination_simple(self): # type: () -> None
self._internal_test_deadend_elimination(False)
def test_deadend_elimination_simple_fixed(self): # type: () -> None
self._internal_test_deadend_elimination(True)
def _get_argmax_output_shape(self, input_shape, axis, keepdims):
assert keepdims
output_shape = list(input_shape[:])
output_shape[axis] = 1
output_shape = tuple(output_shape)
return output_shape
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_no_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_with_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Softmax", "LogSoftmax"]:
for axis_n in range(3):
for axis_max in range(3):
node = helper.make_node(
node_name, ["X"], ["Y"], axis=axis_n)
argmax = helper.make_node(
"ArgMax", ["Y"], ["Z"], axis=axis_max)
output_shape = self._get_argmax_output_shape(
input_shape, axis_max, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
if axis_max == axis_n:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis_max
else:
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_multiple_out(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
node2 = helper.make_node(node_name, ["Y"], ["Z1"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
argmax_output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, argmax_output_shape),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, input_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_consecutive(self):
# type: (GraphProto, ModelProto, bool, int) -> None
input_shape = (5, 7, 11)
def _assertion(graph, optimized_model, axis_aligned, true_axis):
if axis_aligned:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == true_axis
else:
assert optimized_model.graph == graph
# no axis X no axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"])
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model, True, axis)
# no axis X axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_0)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_1)
output_shape = self._get_argmax_output_shape(
input_shape, axis_1, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model,
axis_0 == axis_1, axis_1)
# axis X axis test
for node_name_0 in ["Softmax", "LogSoftmax"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
for axis_2 in range(3):
node = helper.make_node(
node_name_0, ["X"], ["Y"], axis=axis_0)
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_1)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_2)
output_shape = self._get_argmax_output_shape(
input_shape, axis_2, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
if axis_0 == axis_1: # we can reduce both of the monotonic ops
_assertion(graph, optimized_model,
axis_1 == axis_2, axis_2)
elif axis_1 == axis_2: # we can reduce one of the monotonic ops
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[-1].op_type == "ArgMax"
assert optimized_model.graph.node[-1].attribute[0].name == "axis"
assert optimized_model.graph.node[-1].attribute[0].i == axis_2
else: # we can't reduce anything
assert optimized_model.graph == graph
def test_eliminate_nop_dropout(self): # type: () -> None
node = helper.make_node("Dropout", ["X"], ["Y"])
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False)
# we don't want to eliminate the dropoutin opset 12,
# even when it';s an optional parameter (defaults to 0)
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_dropout_opset11_graph_output(self):
node = helper.make_node("Log", ["X"], ["Y"])
node1 = helper.make_node("Dropout", ["Y"], ["Z"], ratio=0.0)
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
assert optimized_model.graph.output[0].name == 'Z'
def test_eliminate_nop_dropout_opset11(self): # type: () -> None
for ratio in [0.0, 0.5]:
node = helper.make_node("Dropout", ["X"], ["Y"], ratio=ratio)
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
if ratio > 0.0:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
def test_fuse_reduction_unsqueeze(self): # type: () -> None
# type: (Tuple[int, ...], List[int], List[int], bool) -> Tuple[int, ...]
def _calculate_post_transform_shape(input_shape, reduction_axes, unsqueeze_axes, keepdim):
post_reduce_shape = None
if keepdim:
post_reduce_shape = tuple(
[(x if i not in reduction_axes else 1) for i, x in enumerate(input_shape)])
else:
post_reduce_shape = tuple(
[x for i, x in enumerate(input_shape) if i not in reduction_axes])
post_unsqueeze_shape = list(post_reduce_shape)
for ax in unsqueeze_axes:
post_unsqueeze_shape.insert(ax, 1)
return tuple(post_unsqueeze_shape)
for reduction in ["ReduceL1", "ReduceL2", "ReduceLogSum",
"ReduceLogSumExp", "ReduceMax", "ReduceMean",
"ReduceMin", "ReduceProd", "ReduceSum", "ReduceSumSquare"]:
for axes1 in [[1], [1, 2], [2]]:
for axes2 in [[0], [0, 1], [1]]:
for keepdim in [False, True]:
input_shape = (5, 7, 9)
output_shape = _calculate_post_transform_shape(
input_shape, axes1, axes2, keepdim) # type: Tuple[int, ...]
axes2_arr = np.array(axes2, dtype=np.int64)
graph_input = [helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, axes2_arr.shape)]
graph_initializer = [
helper.make_tensor("Y_axes", TensorProto.INT64,
axes2_arr.shape, axes2_arr.tobytes(), raw=True)
]
if reduction in ("ReduceSum"):
axes1_arr = np.array(axes1, dtype=np.int64)
node = helper.make_node(
reduction, ["X", "X_axes"], ["Y"], keepdims=keepdim)
graph_input.append(
helper.make_tensor_value_info("X_axes", TensorProto.INT64, axes1_arr.shape))
graph_initializer.append(helper.make_tensor("X_axes", TensorProto.INT64,
axes1_arr.shape, axes1_arr.tobytes(), raw=True))
else:
node = helper.make_node(
reduction, ["X"], ["Y"], axes=axes1, keepdims=keepdim)
node1 = helper.make_node(
"Unsqueeze", ["Y", "Y_axes"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
graph_input,
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, output_shape)],
initializer=graph_initializer
)
optimized_model = self._optimized(
graph, ["fuse_consecutive_reduce_unsqueeze"], False)
if keepdim or axes1 != axes2:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[-1].op_type == reduction
if reduction in ("ReduceSum"):
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[-1].input[1]:
assert list(to_array(init)) == axes1
else:
assert optimized_model.graph.node[-1].attribute[0].name == "axes"
assert optimized_model.graph.node[-1].attribute[0].ints == axes1
optimized_output_shape = tuple(
x.dim_value for x in optimized_model.graph.output[0].type.tensor_type.shape.dim)
assert optimized_output_shape == output_shape
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_fasterrcnn_fpn(self): # type: () -> None
model = tv.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# maskrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_maskrcnn_fpn_opset11(self): # type: () -> None
model = tv.models.detection.maskrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# keypointrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_keypointrcnn_fpn(self): # type: () -> None
model = tv.models.detection.keypointrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_shufflenet_v2(self): # type: () -> None
model = tv.models.shufflenet_v2_x1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_mnasnet(self): # type: () -> None
model = tv.models.mnasnet1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_deeplabv3(self): # type: () -> None
model = tv.models.segmentation.deeplabv3_resnet50(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
if __name__ == '__main__':
unittest.main()
| 49.138468
| 161
| 0.552074
| 13,398
| 113,559
| 4.432975
| 0.041947
| 0.102706
| 0.085667
| 0.100769
| 0.834442
| 0.801626
| 0.773795
| 0.748118
| 0.708349
| 0.67146
| 0
| 0.027416
| 0.306519
| 113,559
| 2,310
| 162
| 49.15974
| 0.726772
| 0.054985
| 0
| 0.618904
| 0
| 0
| 0.055421
| 0.013348
| 0
| 0
| 0
| 0.000433
| 0.14178
| 1
| 0.05631
| false
| 0.00553
| 0.016088
| 0.000503
| 0.082453
| 0.004022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.