hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b2aa600bd01e785170c4fb17187c92cc9b766691
| 134
|
py
|
Python
|
venv/Lib/site-packages/PyOpenGL-3.0.1/tests/test_glutinit_single.py
|
temelkirci/Motion_Editor
|
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
|
[
"MIT"
] | 1
|
2022-03-02T17:07:20.000Z
|
2022-03-02T17:07:20.000Z
|
venv/Lib/site-packages/PyOpenGL-3.0.1/tests/test_glutinit_single.py
|
temelkirci/RealTime_6DOF_Motion_Editor
|
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyOpenGL-3.0.1/tests/test_glutinit_single.py
|
temelkirci/RealTime_6DOF_Motion_Editor
|
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
|
[
"MIT"
] | null | null | null |
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
glutInit([''])
glutInitDisplayMode (GLUT_SINGLE | GLUT_RGB)
| 26.8
| 44
| 0.768657
| 18
| 134
| 5.611111
| 0.555556
| 0.29703
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 134
| 5
| 44
| 26.8
| 0.855932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a2382cf8f9e4763edcfde7e3ef793f016c0e9c10
| 45
|
py
|
Python
|
dnetwork/__init__.py
|
heroddaji/SurpriseDeep
|
e5860167fdd1442a32afcb97aa3c6f0c6365b01b
|
[
"BSD-3-Clause"
] | 7
|
2018-12-11T18:14:05.000Z
|
2020-02-29T05:09:47.000Z
|
dnetwork/__init__.py
|
heroddaji/SurpriseDeep
|
e5860167fdd1442a32afcb97aa3c6f0c6365b01b
|
[
"BSD-3-Clause"
] | 1
|
2019-02-14T15:52:18.000Z
|
2019-02-14T15:52:18.000Z
|
dnetwork/__init__.py
|
heroddaji/SurpriseDeep
|
e5860167fdd1442a32afcb97aa3c6f0c6365b01b
|
[
"BSD-3-Clause"
] | null | null | null |
from .graph import *
from .hetegraph import *
| 22.5
| 24
| 0.755556
| 6
| 45
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 24
| 22.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a26c97b112da906a93b6c45e38e745dcbf3946f8
| 227
|
py
|
Python
|
itables/interactive.py
|
jpbarrette/itables
|
87a56140902f02be4f68db5974bd53674f958118
|
[
"MIT"
] | 1
|
2020-08-20T07:32:44.000Z
|
2020-08-20T07:32:44.000Z
|
itables/interactive.py
|
andrewreece/itables
|
2617bed0829ae8b52ea4543cbddcfc77a0ac663a
|
[
"MIT"
] | null | null | null |
itables/interactive.py
|
andrewreece/itables
|
2617bed0829ae8b52ea4543cbddcfc77a0ac663a
|
[
"MIT"
] | null | null | null |
"""Activate the representation of Pandas dataframes as interactive tables"""
import pandas as pd
from .javascript import _datatables_repr_
pd.DataFrame._repr_html_ = _datatables_repr_
pd.Series._repr_html_ = _datatables_repr_
| 32.428571
| 76
| 0.837004
| 30
| 227
| 5.833333
| 0.6
| 0.24
| 0.182857
| 0.251429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105727
| 227
| 6
| 77
| 37.833333
| 0.862069
| 0.30837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a288c6577950a391528591f3bf1f7e0a24338630
| 1,496
|
py
|
Python
|
tests/test_cli/test_source.py
|
sobolevn/dump_env
|
1c1d44613b94bbe67a0e60e101f214f891b9e752
|
[
"MIT"
] | 57
|
2018-04-27T20:13:01.000Z
|
2020-11-18T01:04:52.000Z
|
tests/test_cli/test_source.py
|
sobolevn/dump_env
|
1c1d44613b94bbe67a0e60e101f214f891b9e752
|
[
"MIT"
] | 145
|
2018-01-15T11:06:08.000Z
|
2020-11-20T02:15:49.000Z
|
tests/test_cli/test_source.py
|
sobolevn/dump_env
|
1c1d44613b94bbe67a0e60e101f214f891b9e752
|
[
"MIT"
] | 8
|
2018-02-05T20:54:03.000Z
|
2020-07-28T11:39:17.000Z
|
import delegator
def test_source_vars(monkeypatch, env_file):
"""Check that cli shows only source variables."""
monkeypatch.setenv('NORMAL_KEY', '1')
monkeypatch.setenv('EXTRA_VALUE', '2')
variables = delegator.run('dump-env -s {0}'.format(env_file))
assert variables.out == 'NORMAL_KEY=1\n'
assert variables.subprocess.returncode == 0
def test_source_prefixes(monkeypatch, env_file):
"""Check that cli allows prefixes with source."""
monkeypatch.setenv('NORMAL_KEY', '1')
monkeypatch.setenv('EXTRA_VALUE', '2')
variables = delegator.run('dump-env -p EXTRA_ -s {0}'.format(env_file))
assert variables.out == 'NORMAL_KEY=1\nVALUE=2\n'
assert variables.subprocess.returncode == 0
def test_source_strict(monkeypatch, env_file):
"""Check that cli works correctly with strict-source."""
monkeypatch.setenv('NORMAL_KEY', '1')
monkeypatch.setenv('EXTRA_VALUE', '2')
variables = delegator.run(
'dump-env --strict-source -s {0}'.format(env_file),
)
assert variables.out == 'NORMAL_KEY=1\n'
assert variables.subprocess.returncode == 0
def test_source_strict_fail(monkeypatch, env_file):
"""Check that cli works correctly with strict-source missing keys."""
monkeypatch.setenv('EXTRA_VALUE', '2')
variables = delegator.run(
'dump-env --strict-source -s {0}'.format(env_file),
)
assert variables.err == 'Missing env vars: NORMAL_KEY\n'
assert variables.subprocess.returncode == 1
| 33.244444
| 75
| 0.69385
| 196
| 1,496
| 5.147959
| 0.22449
| 0.055501
| 0.059465
| 0.091179
| 0.864222
| 0.828543
| 0.769078
| 0.769078
| 0.769078
| 0.769078
| 0
| 0.015237
| 0.166444
| 1,496
| 44
| 76
| 34
| 0.793905
| 0.135027
| 0
| 0.571429
| 0
| 0
| 0.207384
| 0.018068
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.142857
| false
| 0
| 0.035714
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2907eaa90268d65169cf1531c74cdc6abeba6d3
| 1,383
|
py
|
Python
|
tests/exploits/exp2.py
|
ecavicc/flagWarehouse
|
7298356d124a4fd9a3cfa50a5407140dde6b7dc5
|
[
"MIT"
] | 6
|
2021-01-08T23:56:50.000Z
|
2022-02-23T01:59:33.000Z
|
tests/exploits/exp2.py
|
ecavicc/flagWarehouse
|
7298356d124a4fd9a3cfa50a5407140dde6b7dc5
|
[
"MIT"
] | null | null | null |
tests/exploits/exp2.py
|
ecavicc/flagWarehouse
|
7298356d124a4fd9a3cfa50a5407140dde6b7dc5
|
[
"MIT"
] | 1
|
2022-01-04T02:41:25.000Z
|
2022-01-04T02:41:25.000Z
|
#!/usr/bin/env python3
from random import choice
from string import ascii_uppercase, digits
from sys import argv
from time import sleep
ip = argv[0]
def rand_flag(length=32):
alphabet = ascii_uppercase + digits
return ' ' + ''.join((choice(alphabet) for i in range(length-1))) + '= '
sleep(0.5)
print('eownfoawenfoviaowedm4ivu39q384uv8m4u3q30vr90q'
+ rand_flag()
+ '\newajnfonwbu439h0q239jt0834h9t8h384hhfn9wf3w0ÉÉç*çF*WéL'
+ rand_flag()
+ '\nerragarbrçFAAAAAAAADWFEWEeargg'
+ rand_flag()
+ '\nerragarbrçFAAAAAAAADWFEWEearggweq4wgq3rg5q564g'
+ rand_flag()
+ '\nertgergRGgrerag89898H888YGB8YBH79j0NIOààà°°°'
+ rand_flag()
+ '\nvreoijgi0reg r0iegreig** fmieofw ewfmwoe8u9009434567£$%&'
+ rand_flag()
+ '\npwefwofekwoiuy9yy87TG/&R/g8b7y)(Ujjm0909000'
)
sleep(2)
print('eownfoawenfoviaowedm4ivu39q384uv8m4u3q30vr90q'
+ rand_flag()
+ '\newajnfonwbu439h0q239jt0834h9t8h384hhfn9wf3w0ÉÉç*çF*WéL'
+ rand_flag()
+ '\nerragarbrçFAAAAAAAADWFEWEeargg'
+ rand_flag()
+ '\nerragarbrçFAAAAAAAADWFEWEearggweq4wgq3rg5q564g'
+ rand_flag()
+ '\nertgergRGgrerag89898H888YGB8YBH79j0NIOààà°°°'
+ rand_flag()
+ '\nvreoijgi0reg r0iegreig** fmieofw ewfmwoe8u9009434567£$%&'
+ rand_flag()
+ '\npwefwofekwoiuy9yy87TG/&R/g8b7y)(Ujjm0909000'
)
| 30.733333
| 76
| 0.686913
| 109
| 1,383
| 8.651376
| 0.46789
| 0.110286
| 0.042418
| 0.123012
| 0.767762
| 0.767762
| 0.767762
| 0.767762
| 0.767762
| 0.767762
| 0
| 0.141959
| 0.195228
| 1,383
| 44
| 77
| 31.431818
| 0.698113
| 0.015184
| 0
| 0.684211
| 0
| 0
| 0.487142
| 0.433505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.105263
| 0
| 0.157895
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2e5f261616d1b97781ce0eaca6c1ef48af5a03e
| 43
|
py
|
Python
|
pystiche/papers/common_utils/__init__.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/papers/common_utils/__init__.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/papers/common_utils/__init__.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
from .misc import *
from .modules import *
| 14.333333
| 22
| 0.72093
| 6
| 43
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 23
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2e8eec2dbcb4cd7f94d0563f37d56db6831b122
| 22
|
py
|
Python
|
data/services/cv19srv/cv19srv/__init__.py
|
TISTATechnologies/cv19
|
5200d20d51ee9e0f4f8cc6f0af0267a3670398ed
|
[
"Apache-2.0"
] | 2
|
2020-10-20T12:05:16.000Z
|
2021-09-21T13:10:17.000Z
|
data/services/cv19srv/cv19srv/__init__.py
|
TISTATechnologies/cv19
|
5200d20d51ee9e0f4f8cc6f0af0267a3670398ed
|
[
"Apache-2.0"
] | 10
|
2020-07-01T16:40:39.000Z
|
2022-01-19T21:37:47.000Z
|
data/services/cv19srv/cv19srv/__init__.py
|
TISTATechnologies/cv19
|
5200d20d51ee9e0f4f8cc6f0af0267a3670398ed
|
[
"Apache-2.0"
] | 1
|
2021-08-09T13:53:50.000Z
|
2021-08-09T13:53:50.000Z
|
from . import cv19srv
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.181818
| 22
| 1
| 22
| 22
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c06a7fd7c0c080329c087bdab4707b25ffa351a
| 390
|
py
|
Python
|
src/spaceone/inventory/manager/__init__.py
|
spaceone-dev/plugin-azure-state-inven-collector
|
7184ae09b85042737c9db371dacd586d23abde21
|
[
"Apache-2.0"
] | 1
|
2020-12-04T01:37:15.000Z
|
2020-12-04T01:37:15.000Z
|
src/spaceone/inventory/manager/__init__.py
|
spaceone-dev/plugin-azure-state-inven-collector
|
7184ae09b85042737c9db371dacd586d23abde21
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/manager/__init__.py
|
spaceone-dev/plugin-azure-state-inven-collector
|
7184ae09b85042737c9db371dacd586d23abde21
|
[
"Apache-2.0"
] | 2
|
2020-12-04T01:37:18.000Z
|
2020-12-28T02:53:39.000Z
|
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.manager.virtual_machine_manager import VirtualMachineManager
from spaceone.inventory.manager.virtual_machine_scale_set_manager import VmScaleSetManager
from spaceone.inventory.manager.subscription_manager import SubscriptionManager
from spaceone.inventory.manager.sql_server_manager import SqlServerManager
| 48.75
| 90
| 0.905128
| 44
| 390
| 7.818182
| 0.409091
| 0.174419
| 0.305233
| 0.325581
| 0.244186
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05641
| 390
| 7
| 91
| 55.714286
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c0b9b384405948e1e479fa360e1573294ae2f04
| 29
|
py
|
Python
|
multiaug/augmenters/image3d/__init__.py
|
Devin-Taylor/MultiAug
|
eca83192a54ffe3362bf90c4181bac1a68481ee5
|
[
"MIT"
] | 17
|
2019-05-08T14:52:32.000Z
|
2022-03-30T01:36:26.000Z
|
multiaug/augmenters/image3d/__init__.py
|
Devin-Taylor/MultiAug
|
eca83192a54ffe3362bf90c4181bac1a68481ee5
|
[
"MIT"
] | null | null | null |
multiaug/augmenters/image3d/__init__.py
|
Devin-Taylor/MultiAug
|
eca83192a54ffe3362bf90c4181bac1a68481ee5
|
[
"MIT"
] | null | null | null |
from .rotate import Rotate3d
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c437eda10e7233e07c659f3a5c53d97ddedc6d9
| 6,069
|
py
|
Python
|
FinalProject/models.py
|
oghahroodi/Active-Learning-in-Neural-Networks
|
a8aac5c9834c538bbc5cc5eeb41afde3b8a043db
|
[
"MIT"
] | null | null | null |
FinalProject/models.py
|
oghahroodi/Active-Learning-in-Neural-Networks
|
a8aac5c9834c538bbc5cc5eeb41afde3b8a043db
|
[
"MIT"
] | null | null | null |
FinalProject/models.py
|
oghahroodi/Active-Learning-in-Neural-Networks
|
a8aac5c9834c538bbc5cc5eeb41afde3b8a043db
|
[
"MIT"
] | 1
|
2021-12-03T17:53:38.000Z
|
2021-12-03T17:53:38.000Z
|
from init import *
def get_discriminative_model(input_shape):
if np.sum(input_shape) < 30:
width = 20
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(width, activation='relu'))
model.add(Dense(width, activation='relu'))
model.add(Dense(width, activation='relu'))
model.add(Dense(2, activation='softmax', name='softmax'))
else:
width = 256
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(width, activation='relu'))
model.add(Dense(width, activation='relu'))
model.add(Dense(width, activation='relu'))
model.add(Dense(2, activation='softmax', name='softmax'))
return model
def LeNet(input_shape, labels=10):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', name='embedding'))
model.add(Dropout(0.5))
model.add(Dense(labels, activation='softmax', name='softmax'))
return model
def VGG(input_shape, labels=10):
weight_decay = 0.0005
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=input_shape, kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(
weight_decay), name='embedding'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(labels, activation='softmax', name='softmax'))
return model
def MobileNet_pretrain(input_shape, labels=10):
base_model = MobileNet(
weights='imagenet', include_top=False, input_shape=input_shape)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x) # dense layer 2
x = Dense(512, activation='relu')(x) # dense layer 3
preds = Dense(labels, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=preds)
return model
def get_autoencoder_model(input_shape, labels=10):
image = Input(shape=input_shape)
encoder = Conv2D(32, (3, 3), activation='relu', padding='same')(image)
encoder = MaxPooling2D((2, 2), padding='same')(encoder)
encoder = Conv2D(8, (3, 3), activation='relu', padding='same')(encoder)
encoder = Conv2D(4, (3, 3), activation='relu', padding='same')(encoder)
encoder = MaxPooling2D((2, 2), padding='same')(encoder)
decoder = UpSampling2D((2, 2), name='embedding')(encoder)
decoder = Conv2D(4, (3, 3), activation='relu', padding='same')(decoder)
decoder = Conv2D(8, (3, 3), activation='relu', padding='same')(decoder)
decoder = UpSampling2D((2, 2))(decoder)
decoder = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder)
decoder = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoder)
autoencoder = Model(image, decoder)
return autoencoder
| 38.656051
| 96
| 0.646565
| 747
| 6,069
| 5.170013
| 0.105756
| 0.159503
| 0.103314
| 0.119627
| 0.817711
| 0.796996
| 0.763335
| 0.723718
| 0.663387
| 0.629208
| 0
| 0.046148
| 0.189488
| 6,069
| 156
| 97
| 38.903846
| 0.738971
| 0.004449
| 0
| 0.721805
| 0
| 0
| 0.053155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037594
| false
| 0
| 0.007519
| 0
| 0.082707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a74b73405bffbf599ea18176a2dca30f73c4d011
| 334
|
py
|
Python
|
src/alphaorm/utilities/constants.py
|
Losintech/python-alpha-orm
|
01e88c5cf21b881dc670d605b353df8ae52eb83c
|
[
"MIT"
] | 1
|
2019-12-06T05:18:38.000Z
|
2019-12-06T05:18:38.000Z
|
src/alphaorm/utilities/constants.py
|
Losintech/python-alpha-orm
|
01e88c5cf21b881dc670d605b353df8ae52eb83c
|
[
"MIT"
] | null | null | null |
src/alphaorm/utilities/constants.py
|
Losintech/python-alpha-orm
|
01e88c5cf21b881dc670d605b353df8ae52eb83c
|
[
"MIT"
] | null | null | null |
UNDERSCORE_NOT_SUPORRTED_ERROR = 'Column names cannot contain `_` symbol'
SPACE_NOT_SUPORRTED_ERROR = 'Column names should not have a space'
def SETUP_PARAMETER_MISSING(paremeter):
return f"The '{paremeter}' is required!"
def DATA_TYPE_ERROR(method):
return f"Parameter passed into method `{method}` must be of type `AlphaRecord`"
| 41.75
| 80
| 0.793413
| 48
| 334
| 5.291667
| 0.666667
| 0.094488
| 0.133858
| 0.181102
| 0.220472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11976
| 334
| 8
| 80
| 41.75
| 0.863946
| 0
| 0
| 0
| 0
| 0
| 0.516418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
a7b5cf83ec9be9727318993dc8b2e6e9c96ac9b5
| 34,517
|
py
|
Python
|
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 4
|
2021-04-23T15:39:17.000Z
|
2021-12-27T22:53:24.000Z
|
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | null | null | null |
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 2
|
2021-04-19T08:28:54.000Z
|
2022-01-19T13:23:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import subprocess
import traceback
import psutil
from settings import NETWORK_ADAPTATER
from utils.utils import write_to_output, get_csv_writer, write_to_json, close_json_writer, get_json_writer,\
write_list_to_json, write_to_csv, get_terminal_decoded_string, record_sha256_logs, process_md5, process_sha1
import win32process
import re
import wmi
import datetime
class _Statemachine(object):
def __init__(self, params):
self.params = params
self.wmi = wmi.WMI()
self.computer_name = params['computer_name']
self.output_dir = params['output_dir']
self.systemroot = params['system_root']
self.logger = params['logger']
self.rand_ext = params['rand_ext']
if 'destination' in params:
self.destination = params['destination']
def _list_network_drives(self):
for disk in self.wmi.Win32_LogicalDisk(DriveType=4):
yield disk.Caption, disk.FileSystem, disk.ProviderName
def _list_drives(self):
for physical_disk in self.wmi.Win32_DiskDrive():
for partition in physical_disk.associators("Win32_DiskDriveToDiskPartition"):
for logical_disk in partition.associators("Win32_LogicalDiskToPartition"):
yield physical_disk.Caption, partition.Caption, logical_disk.Caption, logical_disk.FileSystem
def _list_share(self):
for share in self.wmi.Win32_Share():
yield share.Name, share.Path
def _list_running(self):
for process in self.wmi.Win32_Process():
yield [process.ProcessId, process.Name, process.CommandLine, process.ExecutablePath]
def _list_sessions(self):
for session in self.wmi.Win32_Session():
yield session.LogonId, session.AuthenticationPackage, session.StartTime, session.LogonType
def _list_scheduled_jobs(self):
proc = subprocess.Popen(["schtasks.exe", '/query', '/fo', 'CSV'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
res = proc.communicate()
res = get_terminal_decoded_string(res[0])
column_names = None
for line in res.splitlines():
if line == "":
continue
if line[0] != '"':
continue
if column_names is None:
column_names = line
continue
elif column_names == line:
continue
yield line
def _list_at_scheduled_jobs(self):
proc = subprocess.Popen('at', stdout=subprocess.PIPE)
res = proc.communicate()
res = get_terminal_decoded_string(res[0])
for line in res.splitlines()[1:]:
line = re.compile(' {2,}').split(line, 4)
if len(line) is 5:
yield line
def _list_network_adapters(self):
net = self.wmi.Win32_NetworkAdapter()
for n in net:
netcard = n.Caption
IPv4 = ''
IPv6 = ''
DHCP_server = ''
DNS_server = ''
adapter_type = ''
nbtstat_value = ''
if n.AdapterTypeID:
adapter_type = NETWORK_ADAPTATER[int(n.AdapterTypeID)]
net_enabled = n.NetEnabled
mac_address = n.MACAddress
description = n.Description
physical_adapter = unicode(n.PhysicalAdapter)
product_name = n.ProductName
speed = n.Speed
database_path = ''
if net_enabled:
nic = self.wmi.Win32_NetworkAdapterConfiguration(MACAddress=mac_address)
for nc in nic:
database_path = nc.DatabasePath
if nc.IPAddress:
try:
IPv4 = nc.IPAddress[0]
IPv6 = nc.IPAddress[1]
except IndexError:
self.logger.error('Error to catch IP Address %s ' % str(nc.IPAddress))
if IPv4:
nbtstat = 'nbtstat -A ' + IPv4
p = subprocess.Popen(nbtstat, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
# output=utils.decode_output_cmd(output)
output = get_terminal_decoded_string(output)
nbtstat_value = output.split('\r\n')
nbtstat_value = ' '.join([n.replace('\n', '') for n in nbtstat_value])
if nc.DNSServerSearchOrder:
DNS_server = nc.DNSServerSearchOrder[0]
if nc.DHCPEnabled:
if nc.DHCPServer:
DHCP_server = nc.DHCPServer
yield netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, speed,\
IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value
def _list_arp_table(self):
cmd = "arp -a"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
output = get_terminal_decoded_string(output)
item = output.split("\n")
for i in item:
yield i
def _list_route_table(self):
route_table = self.wmi.Win32_IP4RouteTable()
for r in route_table:
yield r.Name, r.Mask
def _list_sockets_network(self):
for pid in win32process.EnumProcesses():
try:
p = psutil.Process(pid)
local_addr = ''
local_port = ''
remote_addr = ''
remote_port = ''
for connection in p.connections():
if len(connection.laddr) > 0:
local_addr = connection.laddr[0]
local_port = connection.laddr[1]
if len(connection.raddr) > 0:
remote_addr = connection.raddr[0]
remote_port = connection.raddr[1]
yield pid, p.name(), local_addr, local_port, remote_addr, remote_port, connection.status
except psutil.AccessDenied:
self.logger.warning(traceback.format_exc())
def _list_services(self):
services = self.wmi.Win32_Service()
for s in services:
yield s.Name, s.Caption, s.ProcessId, s.PathName, s.ServiceType, s.Status, s.State, s.StartMode
def _list_kb(self):
for kb in self.wmi.Win32_QuickFixEngineering():
yield kb.Caption, kb.CSName, kb.FixComments, kb.HotFixID, kb.InstallDate, kb.InstalledOn, kb.Name, \
kb.ServicePackInEffect, kb.Status
def _csv_list_running_process(self, list_running):
self.logger.info("Health : Listing running processes")
with open(self.output_dir + '%s_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "COMMAND", "EXEC_PATH"], csv_writer)
for p in list_running:
pid = p[0]
name = p[1]
cmd = p[2]
exe_path = p[3]
write_to_csv(
[self.computer_name, 'processes', unicode(pid), name, unicode(cmd), unicode(exe_path)],
csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_running_process(self, list_running):
self.logger.info("Health : Listing running processes")
if self.destination == 'local':
with open(self.output_dir + '%s_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "COMMAND", "EXEC_PATH"]]
to_write += [[self.computer_name, 'processes', unicode(p[0]), p[1], unicode(p[2]), unicode(p[3])]
for p in list_running]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_hash_running_process(self, list_running):
self.logger.info("Health : Hashing running processes")
with open(self.output_dir + '%s_hash_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "EXEC_PATH", "MD5", "SHA1", "CTIME", "MTIME",
"ATIME"], csv_writer)
for p in list_running:
pid = p[0]
name = p[1]
# cmd = p[2]
exe_path = p[3]
if exe_path and os.path.isfile(exe_path):
ctime = datetime.datetime.fromtimestamp(os.path.getctime(exe_path))
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(exe_path))
atime = datetime.datetime.fromtimestamp(os.path.getatime(exe_path))
md5 = process_md5(unicode(exe_path))
sha1 = process_sha1(unicode(exe_path))
write_to_csv(
[self.computer_name, 'hash processes', unicode(pid), name, unicode(exe_path), md5, sha1, ctime,
mtime, atime], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_hash_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_hash_running_process(self, list_running):
self.logger.info("Health : Hashing running processes")
if self.destination == 'local':
with open(self.output_dir + '%s_hash_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "EXEC_PATH", "MD5", "SHA1", "CTIME",
"MTIME", "ATIME"]]
for p in list_running:
pid = p[0]
name = p[1]
# cmd = p[2]
exe_path = p[3]
if exe_path and os.path.isfile(exe_path):
ctime = datetime.datetime.fromtimestamp(os.path.getctime(exe_path))
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(exe_path))
atime = datetime.datetime.fromtimestamp(os.path.getatime(exe_path))
md5 = process_md5(unicode(exe_path))
sha1 = process_sha1(unicode(exe_path))
to_write += [[self.computer_name, 'hash processes', unicode(pid), name, unicode(exe_path), md5,
sha1, ctime, mtime, atime]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_hash_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_share(self, share):
self.logger.info("Health : Listing shares")
with open(self.output_dir + '%s_shares' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "SHARE_NAME", "SHARE_PATH"], csv_writer)
for name, path in share:
write_to_csv([self.computer_name, 'shares', name, path], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_shares' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_share(self, share):
self.logger.info("Health : Listing shares")
if self.destination == 'local':
with open(self.output_dir + '%s_shares' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "SHARE_NAME", "SHARE_PATH"]]
to_write += [[self.computer_name, 'shares', name, path] for name, path in share]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_shares' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_drives(self, drives):
self.logger.info("Health : Listing drives")
with open(self.output_dir + '%s_list_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "FAB", "PARTITIONS", "DISK", "FILESYSTEM"], csv_writer)
for phCapt, partCapt, logicalCapt, fs in drives:
write_to_csv([self.computer_name, 'list_drives', phCapt, partCapt, logicalCapt, fs], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_drives(self, drives):
self.logger.info("Health : Listing drives")
if self.destination == 'local':
with open(self.output_dir + '%s_list_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "FAB", "PARTITIONS", "DISK", "FILESYSTEM"]]
to_write += [[self.computer_name, 'list_drives', phCapt, partCapt, logicalCapt, fs]
for phCapt, partCapt, logicalCapt, fs in drives]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_network_drives(self, drives):
self.logger.info("Health : Listing network drives")
with open(self.output_dir + '%s_list_networks_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "DISK", "FILESYSTEM", "PARTITION_NAME"], csv_writer)
for diskCapt, diskFs, diskPName in drives:
write_to_csv([self.computer_name, 'list_networks_drives', diskCapt, diskFs, diskPName], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_networks_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_network_drives(self, drives):
self.logger.info("Health : Listing network drives")
if self.destination == 'local':
with open(self.output_dir + '%s_list_networks_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "DISK", "FILESYSTEM", "PARTITION_NAME"]]
to_write += [[self.computer_name, 'list_networks_drives', diskCapt, diskFs, diskPName]
for diskCapt, diskFs, diskPName in drives]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_networks_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_sessions(self, sessions):
self.logger.info('Health : Listing sessions')
with open(self.output_dir + '%s_sessions' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "LOGON_ID", "AUTH_PACKAGE", "START_TIME", "LOGON_TYPE"], csv_writer)
for logonID, authenticationPackage, startime, logontype in sessions:
write_to_csv([self.computer_name, 'sessions', unicode(logonID),
authenticationPackage, unicode(startime.split('.')[0]), unicode(logontype)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sessions' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_sessions(self, sessions):
self.logger.info('Health : Listing sessions')
if self.destination == 'local':
with open(self.output_dir + '%s_sessions' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "LOGON_ID", "AUTH_PACKAGE", "START_TIME", "LOGON_TYPE"]]
to_write += [[self.computer_name, 'sessions', unicode(logonID), authenticationPackage,
unicode(startime.split('.')[0]), unicode(logontype)]
for logonID, authenticationPackage, startime, logontype in sessions]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sessions' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_scheduled_jobs(self, is_at_available=False):
self.logger.info('Health : Listing scheduled jobs')
file_tasks = self.output_dir + '%s_scheduled_jobs' % self.computer_name + self.rand_ext
with open(file_tasks, 'wb') as tasks_logs:
write_to_output('"COMPUTER_NAME","TYPE","TASK_NAME","NEXT_SCHEDULE","STATUS"\r\n', tasks_logs, self.logger)
csv_writer = get_csv_writer(tasks_logs)
for line in self._list_scheduled_jobs():
write_to_csv([self.computer_name, 'scheduled_jobs'] + line.replace('"', '').split(','), csv_writer)
if is_at_available:
for line in self._list_at_scheduled_jobs():
write_to_csv([self.computer_name, 'scheduled_jobs', line[4], line[2] + ' ' + line[3], line[0]],
csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_scheduled_jobs' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_scheduled_jobs(self, is_at_available=False):
self.logger.info('Health : Listing scheduled jobs')
if self.destination == 'local':
file_tasks = self.output_dir + '%s_scheduled_jobs' % self.computer_name + self.rand_ext
with open(file_tasks, 'wb') as tasks_logs:
json_writer = get_json_writer(tasks_logs)
header = ["COMPUTER_NAME", "TYPE", 'TASK_NAME', 'NEXT_SCHEDULE', "STATUS"]
for line in self._list_scheduled_jobs():
write_to_json(header, [self.computer_name, 'Scheduled Jobs'] + line.replace('"', '').split(','),
json_writer)
if is_at_available:
for line in self._list_at_scheduled_jobs():
write_to_json(header, [self.computer_name, 'scheduled_jobs', line[4], line[2] + ' ' + line[3], line[0]],
json_writer)
close_json_writer(json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_scheduled_jobs' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_network_adapters(self, ncs):
self.logger.info('Health : Listing network adapters')
with open(self.output_dir + '%s_networks_cards' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "NETWORK_CARD", "ADAPTER_TYPE", "DESCRIPTION", "MAC_ADDR",
"PRODUCT_NAME", "PHYSICAL_ADAPTER", "SPEED", "IPv4", "IPv6", "DHCP_SERVER", "DNS_SERVER",
"DATABASE_PATH", "NBTSTAT_VALUE"], csv_writer)
for netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, speed, \
IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value in ncs:
if netcard is None:
netcard = ' '
if adapter_type is None:
adapter_type = ' '
if description is None:
description = ' '
if mac_address is None:
mac_address = ' '
if physical_adapter is None:
physical_adapter = ' '
if product_name is None:
product_name = ' '
if speed is None:
speed = ' '
if IPv4 is None:
IPv4 = ' '
if IPv6 is None:
IPv6 = ' '
if DHCP_server is None:
DHCP_server = ' '
if DNS_server is None:
DNS_server = ' '
if database_path is None:
database_path = ' '
if nbtstat_value is None:
nbtstat_value = ' '
try:
write_to_csv([self.computer_name,
'networks_cards', netcard, adapter_type,
description, mac_address, product_name,
physical_adapter, speed, IPv4,
IPv6, DHCP_server, DNS_server,
database_path, nbtstat_value], csv_writer)
except IOError:
self.logger.error(traceback.format_exc())
record_sha256_logs(self.output_dir + self.computer_name + '_networks_cards' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_network_adapters(self, ncs):
self.logger.info('Health : Listing network adapters')
if self.destination == 'local':
with open(self.output_dir + '%s_networks_cards' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "NETWORK_CARD", "ADAPTER_TYPE", "DESCRIPTION", "MAC_ADDR",
"PRODUCT_NAME", "PHYSICAL_ADAPTER", "SPEED", "IPv4", "IPv6", "DHCP_SERVER", "DNS_SERVER",
"DATABASE_PATH", "NBTSTAT_VALUE"]]
for netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, \
speed, IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value in ncs:
if netcard is None:
netcard = ' '
if adapter_type is None:
adapter_type = ' '
if description is None:
description = ' '
if mac_address is None:
mac_address = ' '
if physical_adapter is None:
physical_adapter = ' '
if product_name is None:
product_name = ' '
if speed is None:
speed = ' '
if IPv4 is None:
IPv4 = ' '
if IPv6 is None:
IPv6 = ' '
if DHCP_server is None:
DHCP_server = ' '
if DNS_server is None:
DNS_server = ' '
if database_path is None:
database_path = ' '
if nbtstat_value is None:
nbtstat_value = ' '
try:
to_write += [[self.computer_name, 'networks_cards', netcard, adapter_type, description,
mac_address, product_name, physical_adapter, speed, IPv4, IPv6, DHCP_server,
DNS_server, database_path, nbtstat_value]]
except IOError:
self.logger.error(traceback.format_exc())
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_networks_cards' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_arp_table(self, arp):
self.logger.info('Health : Listing ARP tables')
with open(self.output_dir + '%s_arp_table' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "IP", "MAC_ADDR", "STATUS"], csv_writer)
for entry in arp:
entry.replace('\xff', '')
tokens = entry.split()
entry_to_write = ''
if len(tokens) == 3:
entry_to_write = '"' + self.computer_name + '"|"arp_table"|"' + '"|"'.join(tokens) + '"\n'
if entry_to_write.find('\.') != 1 and len(entry_to_write) > 0:
arr_to_write = [self.computer_name, 'arp_table'] + tokens
write_to_csv(arr_to_write, csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_arp_table' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_arp_table(self, arp):
self.logger.info('Health : Listing ARP tables')
if self.destination == 'local':
with open(self.output_dir + '%s_arp_table' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "IP", "MAC_ADDR", "STATUS"]]
for entry in arp:
entry.replace('\xff', '')
tokens = entry.split()
entry_to_write = ''
if len(tokens) == 3:
entry_to_write = '"' + self.computer_name + '"|"arp_table"|"' + '"|"'.join(tokens) + '"\n'
if entry_to_write.find('\.') != 1 and len(entry_to_write) > 0:
to_write += [[self.computer_name, 'arp_table'] + tokens]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_arp_table' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_route_table(self, routes):
self.logger.info('Health : Listing routes tables')
with open(self.output_dir + '%s_routes_tables' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "NAME", "MASK"], csv_writer)
for ip, mask in routes:
write_to_csv([self.computer_name, 'routes_tables', unicode(ip), unicode(mask)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_routes_tables' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_route_table(self, routes):
self.logger.info('Health : Listing routes tables')
if self.destination == 'local':
with open(self.output_dir + '%s_routes_tables' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "NAME", "MASK"]]
to_write += [[self.computer_name, 'routes_tables', unicode(ip), unicode(mask)] for ip, mask in routes]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_routes_tables' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_sockets_network(self, connections):
self.logger.info('Health : Listing sockets networks')
with open(self.output_dir + '%s_sockets' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "LOCAL_ADDR", "SOURCE_PORT", "REMOTE_ADDR",
"REMOTE_PORT", "STATUS"], csv_writer)
for pid, name, local_address, source_port, remote_addr, remote_port, status in connections:
write_to_csv([self.computer_name, 'sockets', unicode(pid),
unicode(name), unicode(local_address), unicode(source_port),
unicode(remote_addr), unicode(remote_port), unicode(status)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sockets' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_sockets_network(self, connections):
self.logger.info('Health : Listing sockets networks')
if self.destination == 'local':
with open(self.output_dir + '%s_sockets' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "LOCAL_ADDR", "SOURCE_PORT", "REMOTE_ADDR",
"REMOTE_PORT", "STATUS"]]
for pid, name, local_address, source_port, remote_addr, remote_port, status in connections:
to_write += [[self.computer_name, 'sockets', unicode(pid), unicode(name), unicode(local_address),
unicode(source_port), unicode(remote_addr), unicode(remote_port), unicode(status)]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sockets' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_services(self, services):
self.logger.info('Health : Listing services')
with open(self.output_dir + '%s_services' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "CAPTION", "PID", "SERVICE_TYPE", "PATH_NAME", "STATUS", "STATE",
"START_MODE"], csv_writer)
for name, caption, processId, pathName, serviceType, status, state, startMode in services:
write_to_csv([self.computer_name, 'services', caption,
unicode(processId), serviceType, pathName,
unicode(status), state, startMode], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_services' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_services(self, services):
self.logger.info('Health : Listing services')
if self.destination == 'local':
with open(self.output_dir + '%s_services' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "CAPTION", "PID", "SERVICE_TYPE", "PATH_NAME", "STATUS", "STATE",
"START_MODE"]]
for name, caption, processId, pathName, serviceType, status, state, startMode in services:
to_write += [[self.computer_name, 'services', caption, unicode(processId), serviceType, pathName,
unicode(status), state, startMode]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_services' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_kb(self, kbs):
self.logger.info('Health : Listing KB installed on computer')
with open(self.output_dir + '%s_kb' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "CAPTION", "CS_NAME", "FIX_COMMENTS", "HOTFIX_ID", "INSTALL_DATE",
"INSTALLED_ON", "NAME", "SERVICE_PACK", "STATUS"], csv_writer)
for Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name, ServicePackInEffect, Status in kbs:
write_to_csv(
[self.computer_name, 'kb', Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name,
ServicePackInEffect, Status], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_kb' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_kb(self, kbs):
self.logger.info('Health : Listing KB installed on computer')
if self.destination == 'local':
with open(self.output_dir + '%s_kb' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "CAPTION", "CS_NAME", "FIX_COMMENTS", "HOTFIX_ID", "INSTALL_DATE",
"INSTALLED_ON", "NAME", "SERVICE_PACK", "STATUS"]]
for Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name, ServicePackInEffect, Status in kbs:
to_write += [[self.computer_name, 'kb', Caption, CSName, FixComments, HotFixID, InstallDate,
InstalledOn, Name, ServicePackInEffect, Status]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_kb' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
| 58.109428
| 128
| 0.576093
| 3,900
| 34,517
| 4.795641
| 0.071795
| 0.087259
| 0.093247
| 0.047265
| 0.826071
| 0.810244
| 0.785168
| 0.773085
| 0.74865
| 0.722184
| 0
| 0.012055
| 0.317496
| 34,517
| 593
| 129
| 58.20742
| 0.781858
| 0.002376
| 0
| 0.541284
| 0
| 0
| 0.117165
| 0.00607
| 0.00367
| 0
| 0
| 0
| 0
| 1
| 0.073395
| false
| 0
| 0.020183
| 0
| 0.095413
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a7bcfa59c6d42b463a917cbf7f1e86343fda7e3b
| 128
|
py
|
Python
|
indiecoin/node/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | 5
|
2017-11-20T08:46:38.000Z
|
2021-12-28T20:49:16.000Z
|
indiecoin/node/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | null | null | null |
indiecoin/node/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | null | null | null |
class DNSSeed(object):
def __init__(self):
""" @TODO:
Implement DNS SEED
"""
pass
| 14.222222
| 34
| 0.445313
| 11
| 128
| 4.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.445313
| 128
| 8
| 35
| 16
| 0.746479
| 0.195313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
a7c2e3664b7122e107a67963570da6f77a7312a1
| 98
|
py
|
Python
|
src/main.py
|
lss8/projeto-ic
|
c4bad14eedf750661ef51c7dee4613c7ea452ffc
|
[
"MIT"
] | null | null | null |
src/main.py
|
lss8/projeto-ic
|
c4bad14eedf750661ef51c7dee4613c7ea452ffc
|
[
"MIT"
] | null | null | null |
src/main.py
|
lss8/projeto-ic
|
c4bad14eedf750661ef51c7dee4613c7ea452ffc
|
[
"MIT"
] | null | null | null |
import os
print(os.environ['MACHINE_LEARNING_FOR_KIDS_API_CLASSIFY_URL'])
print("Hello, world")
| 16.333333
| 63
| 0.806122
| 15
| 98
| 4.866667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 98
| 5
| 64
| 19.6
| 0.802198
| 0
| 0
| 0
| 0
| 0
| 0.55102
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a7ccedb1cd281313de59bd0a2ff3ea5b415854f7
| 19,164
|
py
|
Python
|
tests/app/main/views/test_service_updates.py
|
ArenaNetworks/dto-digitalmarketplace-admin-frontend
|
2731027a1685890c8f2794b3c816f20b2d496b61
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_service_updates.py
|
ArenaNetworks/dto-digitalmarketplace-admin-frontend
|
2731027a1685890c8f2794b3c816f20b2d496b61
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_service_updates.py
|
ArenaNetworks/dto-digitalmarketplace-admin-frontend
|
2731027a1685890c8f2794b3c816f20b2d496b61
|
[
"MIT"
] | 1
|
2021-08-23T06:05:43.000Z
|
2021-08-23T06:05:43.000Z
|
import mock
import pytest
from datetime import datetime
from dmutils.formats import DISPLAY_DATE_FORMAT
from dmutils.forms import FakeCsrf
from dmapiclient.audit import AuditTypes
from ...helpers import LoggedInApplicationTest
class TestServiceUpdates(LoggedInApplicationTest):
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_render_activity_page_with_date(self, data_api_client):
pytest.skip("fails before 11am????")
today = datetime.utcnow().strftime(DISPLAY_DATE_FORMAT)
response = self.client.get('/admin/service-updates')
self.assertEquals(200, response.status_code)
date_header = """
<p class="context">
Activity for
</p>
<h1>
{}
</h1>
""".format(today)
self.assertIn(
self._replace_whitespace(date_header),
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_render_correct_form_defaults(self, data_api_client):
response = self.client.get('/admin/service-updates')
self.assertEquals(200, response.status_code)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
self._replace_whitespace(
'<input name="acknowledged" value="false" id="acknowledged-3" type="radio" aria-controls="" checked>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_not_allow_invalid_dates(self, data_api_client):
response = self.client.get('/admin/service-updates?audit_date=invalid')
self.assertEquals(400, response.status_code)
self.assertIn(
"Not a valid date value",
response.get_data(as_text=True)
)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="invalid">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
'<div class="validation-masthead" aria-labelledby="validation-masthead-heading">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
self._replace_whitespace(
'<a href="#example-textbox" class="validation-masthead-link"><label for="audit_date">Audit Date</label></a>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_not_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_not_allow_invalid_acknowledges(self, data_api_client):
response = self.client.get(
'/admin/service-updates?acknowledged=invalid'
)
self.assertEquals(400, response.status_code)
self.assertIn(
self._replace_whitespace(
'<a href="#example-textbox" class="validation-masthead-link"><label for="acknowledged">acknowledged</label></a>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_not_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_allow_valid_submission_all(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?audit_date=2006-01-01&acknowledged=all')
self.assertEquals(200, response.status_code)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="2006-01-01">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
self._replace_whitespace(
'<inputname="acknowledged"value="all"id="acknowledged-1"type="radio"aria-controls=""checked>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_allow_valid_submission_date_fields(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?audit_date=2006-01-01') # noqa
self.assertEquals(200, response.status_code)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="2006-01-01">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
self._replace_whitespace(
'<inputname="acknowledged"value="false"id="acknowledged-3"type="radio"aria-controls=""checked>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called_with(
audit_date='2006-01-01',
audit_type=AuditTypes.update_service,
acknowledged='false',
page=1)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_allow_acknowledged_fields(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?acknowledged=false') # noqa
self.assertEquals(200, response.status_code)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="">', # noqa
response.get_data(as_text=True)
)
self.assertIn(
self._replace_whitespace(
'<inputname="acknowledged"value="false"id="acknowledged-3"type="radio"aria-controls=""checked>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called_with(
audit_date=None,
audit_type=AuditTypes.update_service,
acknowledged='false',
page=1)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_call_api_with_correct_params(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?audit_date=2006-01-01&acknowledged=all') # noqa
self.assertEquals(200, response.status_code)
data_api_client.find_audit_events.assert_called_with(
audit_type=AuditTypes.update_service,
audit_date='2006-01-01',
acknowledged='all',
page=1)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_call_api_with_none_date(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?acknowledged=all') # noqa
self.assertEquals(200, response.status_code)
data_api_client.find_audit_events.assert_called_with(
audit_type=AuditTypes.update_service,
audit_date=None,
acknowledged='all',
page=1)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_render_activity_page_with_form_date(self, data_api_client):
response = self.client.get(
'/admin/service-updates?audit_date=2010-01-01'
)
self.assertEquals(200, response.status_code)
date_header = """
<p class="context">
Activity for
</p>
<h1>
Friday 1 January 2010
</h1>
"""
self.assertIn(
self._replace_whitespace(date_header),
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_redirect_to_update_page(self, data_api_client):
response = self.client.post(
'/admin/service-updates/123/acknowledge',
data={
'acknowledged': 'false',
'audit_date': '2010-01-05',
'csrf_token': FakeCsrf.valid_token,
}
)
self.assertEquals(302, response.status_code)
self.assertIn(
'http://localhost/admin/service-updates',
response.location)
self.assertIn(
'acknowledged=false',
response.location)
self.assertIn(
'audit_date=2010-01-05',
response.location)
data_api_client.acknowledge_audit_event.assert_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_not_call_api_when_form_errors(self, data_api_client):
response = self.client.post(
'/admin/service-updates/123/acknowledge',
data={
'acknowledged': 'false',
'audit_date': 'invalid',
'csrf_token': FakeCsrf.valid_token,
}
)
self.assertEquals(400, response.status_code)
data_api_client.acknowledge_audit_event.assert_not_called()
self.assertIn(
self._replace_whitespace(
'<inputname="acknowledged"value="false"id="acknowledged-3"type="radio"aria-controls=""checked>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
self.assertIn(
'<input class="filter-field-text" id="audit_date" name="audit_date" placeholder="eg, 2015-07-23" type="text" value="invalid">', # noqa
response.get_data(as_text=True)
)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_show_no_updates_if_none_returned(self, data_api_client):
data_api_client.find_audit_events.return_value = {'auditEvents': [], 'links': {}}
response = self.client.get('/admin/service-updates?audit_date=2006-01-01') # noqa
self.assertEquals(200, response.status_code)
self.assertIn(
self._replace_whitespace('Noauditeventsfound'),
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called_with(
page=1,
audit_date='2006-01-01',
audit_type=AuditTypes.update_service,
acknowledged='false')
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_show_no_updates_if_invalid_search(self, data_api_client):
response = self.client.get('/admin/service-updates?audit_date=invalid') # noqa
self.assertEquals(400, response.status_code)
self.assertIn(
self._replace_whitespace('Noauditeventsfound'),
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_not_called()
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_show_updates_if_valid_search(self, data_api_client):
audit_event = {
'auditEvents': [
{
'links': {
'self': 'http://localhost:5000/adit-events'
},
'data': {
'serviceName': 'new name',
'supplierId': 93518,
'supplierName': 'Clouded Networks'
},
'user': 'joeblogs',
'type': 'update_service',
'id': 25,
'createdAt': '2015-06-17T08:49:22.999Z'
}
],
'links': {}
}
data_api_client.find_audit_events.return_value = audit_event
response = self.client.get('/admin/service-updates?audit_date=2010-01-01') # noqa
self.assertEquals(200, response.status_code)
self.assertIn(
self._replace_whitespace(
'<td class="summary-item-field-first"><span>Clouded Networks</span></td>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
self.assertIn(
self._replace_whitespace(
'<td class="summary-item-field"><span>18:49:22<br/>17 June</span></td>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
self.assertIn(
self._replace_whitespace(
'<td class="summary-item-field-with-action"><span><a href="/admin/services/compare/...">View changes</a></span></td>'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
self.assertIn(
self._replace_whitespace(
'<form action="/admin/service-updates/25/acknowledge" method="post">'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
self.assertIn(
self._replace_whitespace(
'<input name="audit_date" type="hidden" value="2010-01-01">'), # noqa
self._replace_whitespace(response.get_data(as_text=True))
)
data_api_client.find_audit_events.assert_called_with(
page=1,
audit_type=AuditTypes.update_service,
acknowledged='false',
audit_date='2010-01-01')
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_call_api_ack_audit_event(self, data_api_client):
response = self.client.post(
'/admin/service-updates/123/acknowledge?audit_date=2010-01-01&acknowledged=all',
data={'csrf_token': FakeCsrf.valid_token},
)
self.assertEquals(302, response.status_code)
data_api_client.acknowledge_audit_event.assert_called_with(
'123', 'test@example.com'
)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_pass_valid_page_argument_to_api(self, data_api_client):
response = self.client.get('/admin/service-updates?page=5')
self.assertEquals(200, response.status_code)
data_api_client.find_audit_events.assert_called_with(
page=5,
audit_type=AuditTypes.update_service,
acknowledged='false',
audit_date=None
)
@mock.patch('app.main.views.service_updates.data_api_client')
def test_should_not_pass_invalid_page_argument_to_api(self, data_api_client):
response = self.client.get('/admin/service-updates?page=invalid')
self.assertEquals(400, response.status_code)
data_api_client.find_audit_events.assert_not_called()
@mock.patch('app.main.views.service_updates.data_api_client')
class TestServiceStatusUpdates(LoggedInApplicationTest):
def test_redirects_to_current_day(self, data_api_client):
response = self.client.get(
'/admin/service-status-updates'
)
self.assertEquals(302, response.status_code)
self.assertIn('http://localhost/admin/service-status-updates/20', response.location)
def test_404s_invalid_date(self, data_api_client):
response = self.client.get(
'/admin/service-status-updates/invalid'
)
self.assertEquals(404, response.status_code)
def test_should_show_updates_for_a_day_with_updates(self, data_api_client):
data_api_client.find_audit_events.return_value = {
'auditEvents': [
{
'data': {
'supplierId': 93518,
'serviceId': 1234567890,
'supplierName': 'Clouded Networks',
'new_status': 'enabled'
},
'user': 'joeblogs',
'type': 'update_status',
'createdAt': '2016-01-01T08:49:22.999Z'
}
]
}
response = self.client.get(
'/admin/service-status-updates/2016-01-01'
)
self.assertEquals(200, response.status_code)
page_contents = self._replace_whitespace(response.get_data(as_text=True))
self.assertIn('Friday1January2016', page_contents)
self.assertIn('1234567890', page_contents)
def test_should_link_to_previous_and_next_days(self, data_api_client):
data_api_client.find_audit_events.return_value = {
'auditEvents': []
}
response = self.client.get(
'/admin/service-status-updates/2015-12-23'
)
page_contents = self._replace_whitespace(response.get_data(as_text=True))
self.assertIn('Wednesday23December2015', page_contents)
self.assertIn('class="next-page"', page_contents)
self.assertIn('Tuesday22December2015', page_contents)
self.assertIn('/service-status-updates/2015-12-22', page_contents)
self.assertIn('class="previous-page"', page_contents)
self.assertIn('Thursday24December2015', page_contents)
self.assertIn('/service-status-updates/2015-12-24', page_contents)
def test_should_link_to_next_page(self, data_api_client):
data_api_client.find_audit_events.return_value = {
'auditEvents': [],
'links': {
'next': '/'
}
}
response = self.client.get(
'/admin/service-status-updates/2015-12-23'
)
page_contents = self._replace_whitespace(response.get_data(as_text=True))
self.assertIn('class="next-page"', page_contents)
self.assertIn('Page2', page_contents)
self.assertIn('ofWednesday23December2015', page_contents)
self.assertIn('/service-status-updates/2015-12-23/page-2', page_contents)
self.assertIn('Nextday', page_contents)
def test_should_link_to_previous_page(self, data_api_client):
data_api_client.find_audit_events.return_value = {
'auditEvents': [],
'links': {
'next': '/',
'prev': '/'
}
}
response = self.client.get(
'/admin/service-status-updates/2015-12-23/page-2'
)
page_contents = self._replace_whitespace(response.get_data(as_text=True))
self.assertIn('class="previous-page"', page_contents)
self.assertIn('Page1', page_contents)
self.assertIn('ofWednesday23December2015', page_contents)
self.assertIn('/service-status-updates/2015-12-23/page-1', page_contents)
| 39.925
| 150
| 0.634001
| 2,186
| 19,164
| 5.265325
| 0.096981
| 0.043788
| 0.081321
| 0.041355
| 0.851434
| 0.837619
| 0.836577
| 0.830235
| 0.807993
| 0.765682
| 0
| 0.032144
| 0.246765
| 19,164
| 479
| 151
| 40.008351
| 0.765223
| 0.006731
| 0
| 0.588832
| 0
| 0.043147
| 0.276568
| 0.172506
| 0
| 0
| 0
| 0
| 0.215736
| 1
| 0.060914
| false
| 0.005076
| 0.017767
| 0
| 0.083756
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac1cb218a187c4cf06718102d9de795169ba9280
| 72
|
py
|
Python
|
module3.py
|
piotrbla/pyExamples
|
d949784e614da53afc05a1245c824d0b853d8234
|
[
"MIT"
] | null | null | null |
module3.py
|
piotrbla/pyExamples
|
d949784e614da53afc05a1245c824d0b853d8234
|
[
"MIT"
] | null | null | null |
module3.py
|
piotrbla/pyExamples
|
d949784e614da53afc05a1245c824d0b853d8234
|
[
"MIT"
] | null | null | null |
""" This is module3 """
def weirdfun():
return None
print("module3")
| 18
| 23
| 0.638889
| 9
| 72
| 5.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.180556
| 72
| 4
| 24
| 18
| 0.745763
| 0.208333
| 0
| 0
| 0
| 0
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ac1e6a224bb08ce9626c6b4f0d468511f13b9405
| 47
|
py
|
Python
|
app/geoserver/tests/test_wire_api.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
app/geoserver/tests/test_wire_api.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
app/geoserver/tests/test_wire_api.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
# TODO: create tests for wire list and details
| 23.5
| 46
| 0.765957
| 8
| 47
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 47
| 1
| 47
| 47
| 0.947368
| 0.93617
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac40ae8908e7f9130a1ca9c0fc48c42dba40cf82
| 98
|
py
|
Python
|
rmn/models/segmentation/__init__.py
|
TomKingsfordUoA/ResidualMaskingNetwork
|
6ce5ddf70f8ac8f1e6da2746b0bbeb9e457ceb7d
|
[
"MIT"
] | 242
|
2020-01-09T11:06:21.000Z
|
2022-03-26T14:51:48.000Z
|
rmn/models/segmentation/__init__.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 33
|
2020-01-09T08:42:10.000Z
|
2022-03-23T07:52:56.000Z
|
rmn/models/segmentation/__init__.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 61
|
2020-01-19T02:20:37.000Z
|
2022-03-25T13:08:48.000Z
|
from .segmentation import *
from .fcn import *
from .deeplabv3 import *
from .unet_basic import *
| 19.6
| 27
| 0.755102
| 13
| 98
| 5.615385
| 0.538462
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.163265
| 98
| 4
| 28
| 24.5
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac4c733f8bcb3452a8f37b3e645a3b412d07ff02
| 12,239
|
py
|
Python
|
tests/01-numpy_arrays/010-int32_find_max/test_int32_find_max.py
|
nandub/nim-pymod
|
3e4c49afdfdab2c3325588b6b823c102f22fc588
|
[
"MIT"
] | 256
|
2015-11-12T09:25:21.000Z
|
2022-02-11T01:59:34.000Z
|
tests/01-numpy_arrays/010-int32_find_max/test_int32_find_max.py
|
nandub/nim-pymod
|
3e4c49afdfdab2c3325588b6b823c102f22fc588
|
[
"MIT"
] | 11
|
2015-11-12T22:48:14.000Z
|
2019-03-30T07:44:32.000Z
|
tests/01-numpy_arrays/010-int32_find_max/test_int32_find_max.py
|
nandub/nim-pymod
|
3e4c49afdfdab2c3325588b6b823c102f22fc588
|
[
"MIT"
] | 12
|
2015-11-12T22:28:24.000Z
|
2019-01-08T02:15:26.000Z
|
import array_utils
import numpy
import pytest
def test_0_compile_pymod_test_mod(pmgen_py_compile):
pmgen_py_compile(__name__)
ndims_to_test = [1, 2, 3, 4]
# for loop, values
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopValues",
"int32FindMaxForLoopValues_m",
])
def test_int32FindMaxForLoopValues(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nrandom number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# while loop, Forward Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
def test_int32FindMaxWhileLoopForwardIter(pymod_test_mod, seeded_random_number_generator, ndim):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nrandom number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = pymod_test_mod.int32FindMaxWhileLoopForwardIter(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# for loop, Forward Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopForwardIter",
"int32FindMaxForLoopForwardIter_m",
"int32FindMaxForLoopForwardIter_i",
])
def test_int32FindMaxForLoopForwardIter(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# while loop, Rand Acc Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterDeref",
"int32FindMaxWhileLoopRandaccIterIndex0",
"int32FindMaxWhileLoopRandaccIterDerefPlusZeroOffset",
"int32FindMaxWhileLoopRandaccIterDerefMinusZeroOffset",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_1",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_2",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_3",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_4",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_5",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_1",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_2",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_3",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_4",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_5",
])
def test_int32FindMaxWhileLoopRandaccIterDerefAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffsetK",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffsetK",
])
@pytest.mark.parametrize("k", [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
def test_int32FindMaxWhileLoopRandaccIterDerefKParamAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, k):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, k = %d" % (nim_test_proc_name, k))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, k)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterDeltaN_1",
"int32FindMaxWhileLoopRandaccIterDeltaN_2",
])
@pytest.mark.parametrize("n", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterDeltaN_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, n):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, n = %d" % (nim_test_proc_name, n))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argDeltaN = arg.flat[::n]
print ("arg.flat[::n] =\n%s" % argDeltaN)
expectedRes = argDeltaN.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, n)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterExcludeFirstM_1",
"int32FindMaxWhileLoopRandaccIterExcludeFirstM_2",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterExcludeFirstM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argAfterM = arg.flat[m:]
print ("arg.flat[m:] =\n%s" % argAfterM)
if argAfterM.size > 0:
expectedRes = argAfterM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterExcludeLastM_1",
"int32FindMaxWhileLoopRandaccIterExcludeLastM_2",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterExcludeLastM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argBeforeLastM = arg.flat[:-m]
print ("arg.flat[:-m] =\n%s" % argBeforeLastM)
if argBeforeLastM.size > 0:
expectedRes = argBeforeLastM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
# for loop, Rand Acc Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterDeref",
"int32FindMaxForLoopRandaccIterDeref_m",
"int32FindMaxForLoopRandaccIterDeref_i",
"int32FindMaxForLoopRandaccIterIndex0_i",
])
def test_int32FindMaxForLoopRandaccIterDerefAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterDeltaN",
"int32FindMaxForLoopRandaccIterDeltaN_m",
"int32FindMaxForLoopRandaccIterDeltaN_i",
])
@pytest.mark.parametrize("n", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterDeltaN_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, n):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, n = %d" % (nim_test_proc_name, n))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argDeltaN = arg.flat[::n]
print ("arg.flat[::n] =\n%s" % argDeltaN)
expectedRes = argDeltaN.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, n)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterExcludeFirstM",
"int32FindMaxForLoopRandaccIterExcludeFirstM_m",
"int32FindMaxForLoopRandaccIterExcludeFirstM_i",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterExcludeFirstM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argAfterM = arg.flat[m:]
print ("arg.flat[m:] =\n%s" % argAfterM)
if argAfterM.size > 0:
expectedRes = argAfterM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterExcludeLastM_i",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterExcludeLastM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argBeforeLastM = arg.flat[:-m]
print ("arg.flat[:-m] =\n%s" % argBeforeLastM)
if argBeforeLastM.size > 0:
expectedRes = argBeforeLastM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
| 44.02518
| 112
| 0.704796
| 1,497
| 12,239
| 5.45491
| 0.066132
| 0.051923
| 0.077884
| 0.078986
| 0.713568
| 0.710752
| 0.710752
| 0.710752
| 0.705486
| 0.705119
| 0
| 0.02582
| 0.170929
| 12,239
| 277
| 113
| 44.184116
| 0.778949
| 0.009314
| 0
| 0.743697
| 0
| 0.05042
| 0.270424
| 0.135501
| 0
| 0
| 0
| 0
| 0.05042
| 1
| 0.054622
| false
| 0
| 0.012605
| 0
| 0.067227
| 0.201681
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bac1f717667829e4ab5792ba981a52ef39a2102
| 167,647
|
py
|
Python
|
evaluation_acm_ccr_2019/algorithm_heatmap_plots.py
|
RobinMnk/evaluation-acm-ccr-2019
|
c60ebf1c8b3a3f762ff50101e9c3d10f7cb05e8c
|
[
"MIT"
] | null | null | null |
evaluation_acm_ccr_2019/algorithm_heatmap_plots.py
|
RobinMnk/evaluation-acm-ccr-2019
|
c60ebf1c8b3a3f762ff50101e9c3d10f7cb05e8c
|
[
"MIT"
] | null | null | null |
evaluation_acm_ccr_2019/algorithm_heatmap_plots.py
|
RobinMnk/evaluation-acm-ccr-2019
|
c60ebf1c8b3a3f762ff50101e9c3d10f7cb05e8c
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2016-2018 Matthias Rost, Elias Doehne, Alexander Elvers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""This is the evaluation and plotting module.
This module handles all plotting related evaluation.
"""
import itertools
import os
import sys
from collections import namedtuple
from itertools import combinations, product
from time import gmtime, strftime
import copy
try:
import cPickle as pickle
except ImportError:
import pickle
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.patheffects as PathEffects
import matplotlib.patches as mpatches
from matplotlib import gridspec
import yaml
from matplotlib import font_manager
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from alib import solutions, util
from vnep_approx import vine, treewidth_model
from evaluation_acm_ccr_2019 import plot_data
REQUIRED_FOR_PICKLE = solutions # this prevents pycharm from removing this import, which is required for unpickling solutions
OUTPUT_PATH = None
FIGSIZE = (5,3.5)
logger = util.get_logger(__name__, make_file=False, propagate=True)
class HeatmapPlotType(object):
ViNE = 0 # a plot only for OfflineViNEResult data
RandRoundSepLPDynVMP = 1 # a plot only for RandRoundSepLPOptDynVMPCollectionResult data
SeparationLP = 2 # a plot only for SeparationLPSolution data
ComparisonVineRandRound = 3
LatencyStudy = 4
ComparisonLatencyBaseline = 5
VALUE_RANGE = [0, 1, 2, 3, 4, 5]
"""
Collection of heatmap plot specifications. Each specification corresponds to a specific plot and describes all essential
information:
- name: the title of the plot
- filename: prefix of the files to be generated
- plot_type: A HeatmapPlotType describing which data is required as input.
- vmin and vmax: minimum and maximum value for the heatmap
- cmap: the colormap that is to be used for the heatmap
- lookup_function: which of the values shall be plotted. the input is a tuple consisting of a baseline and a randomized rounding
solution. The function must return a numeric value or NaN
- metric filter: after having applied the lookup_function (returning a numeric value or NaN) the metric_filter is
applied (if given) and values not matching this function are discarded.
- rounding_function: the function that is applied for displaying the mean values in the heatmap plots
- colorbar_ticks: the tick values (numeric) for the heatmap plot
"""
def get_list_of_vine_settings():
result = []
for (edge_embedding_model, lp_objective, rounding_procedure) in itertools.product(
vine.ViNEEdgeEmbeddingModel,
vine.ViNELPObjective,
vine.ViNERoundingProcedure,
):
if lp_objective == vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS or lp_objective == vine.ViNELPObjective.ViNE_COSTS_INCL_SCENARIO_COSTS:
continue
if edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE:
continue
result.append(vine.ViNESettingsFactory.get_vine_settings(
edge_embedding_model=edge_embedding_model,
lp_objective=lp_objective,
rounding_procedure=rounding_procedure,
))
return result
def get_list_of_rr_settings():
result = []
for sub_param in itertools.product(
treewidth_model.LPRecomputationMode,
treewidth_model.RoundingOrder,
):
if sub_param[0] == treewidth_model.LPRecomputationMode.RECOMPUTATION_WITH_SINGLE_SEPARATION:
continue
result.append(sub_param)
return result
def get_alg_variant_string(plot_type, algorithm_sub_parameter):
if plot_type == HeatmapPlotType.ViNE:
vine.ViNESettingsFactory.check_vine_settings(algorithm_sub_parameter)
is_splittable = algorithm_sub_parameter.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE
is_load_balanced_objective = (
algorithm_sub_parameter.lp_objective in
[vine.ViNELPObjective.ViNE_LB_DEF, vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS]
)
is_cost_objective = (
algorithm_sub_parameter.lp_objective in
[vine.ViNELPObjective.ViNE_COSTS_DEF, vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS]
)
is_random_rounding_procedure = algorithm_sub_parameter.rounding_procedure == vine.ViNERoundingProcedure.RANDOMIZED
return "vine_{}{}{}{}".format(
"mcf" if is_splittable else "sp",
"_lb" if is_load_balanced_objective else "",
"_cost" if is_cost_objective else "",
"_rand" if is_random_rounding_procedure else "_det",
)
elif plot_type == HeatmapPlotType.RandRoundSepLPDynVMP:
lp_mode, rounding_mode = algorithm_sub_parameter
if lp_mode == treewidth_model.LPRecomputationMode.NONE:
lp_str = "recomp_none"
elif lp_mode == treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION:
lp_str = "recomp_no_sep"
elif lp_mode == treewidth_model.LPRecomputationMode.RECOMPUTATION_WITH_SINGLE_SEPARATION:
lp_str = "recomp_single_sep"
else:
raise ValueError()
if rounding_mode == treewidth_model.RoundingOrder.RANDOM:
rounding_str = "round_rand"
elif rounding_mode == treewidth_model.RoundingOrder.STATIC_REQ_PROFIT:
rounding_str = "round_static_profit"
elif rounding_mode == treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT:
rounding_str = "round_achieved_profit"
else:
raise ValueError()
return "dynvmp__{}__{}".format(
lp_str,
rounding_str,
)
else:
raise ValueError("Unexpected HeatmapPlotType {}".format(plot_type))
class AbstractHeatmapSpecificationVineFactory(object):
prototype = dict()
@classmethod
def get_hs(cls, vine_settings_list, name):
result = copy.deepcopy(cls.prototype)
result['lookup_function'] = lambda x: cls.prototype['lookup_function'](x, vine_settings_list)
result['alg_variant'] = name
return result
@classmethod
def get_specific_vine_name(cls, vine_settings):
vine.ViNESettingsFactory.check_vine_settings(vine_settings)
is_splittable = vine_settings.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE
is_load_balanced_objective = (
vine_settings.lp_objective in
[vine.ViNELPObjective.ViNE_LB_DEF, vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS]
)
is_scenario_cost_objective = (
vine_settings.lp_objective in
[vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS, vine.ViNELPObjective.ViNE_COSTS_INCL_SCENARIO_COSTS]
)
is_random_rounding_procedure = vine_settings.rounding_procedure == vine.ViNERoundingProcedure.RANDOMIZED
return "vine_{}_{}_{}_{}".format(
"mcf" if is_splittable else "sp",
"lb" if is_load_balanced_objective else "cost",
"scenario" if is_scenario_cost_objective else "def",
"rand" if is_random_rounding_procedure else "det",
)
@classmethod
def get_all_vine_settings_list_with_names(cls):
result = []
vine_settings_list = get_list_of_vine_settings()
result.append((vine_settings_list, "vine_ALL")) #first off: every vine combination
# second: each specific one
for vine_settings in vine_settings_list:
result.append(([vine_settings], cls.get_specific_vine_name(vine_settings)))
#third: each aggregation level, when applicable, i.e. there is more than one setting for that
for edge_embedding_model in vine.ViNEEdgeEmbeddingModel:
matching_settings = []
for vine_settings in vine_settings_list:
if vine_settings.edge_embedding_model == edge_embedding_model:
matching_settings.append(vine_settings)
if len(matching_settings) > 0 and len(matching_settings) != len(vine_settings_list):
result.append((matching_settings, "vine_{}".format(
"MCF" if edge_embedding_model is vine.ViNEEdgeEmbeddingModel.SPLITTABLE else "SP")))
for lp_objective in vine.ViNELPObjective:
matching_settings = []
for vine_settings in vine_settings_list:
if vine_settings.lp_objective == lp_objective:
matching_settings.append(vine_settings)
if len(matching_settings) > 0 and len(matching_settings) != len(vine_settings_list):
is_load_balanced_objective = (
vine_settings.lp_objective in
[vine.ViNELPObjective.ViNE_LB_DEF, vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS]
)
is_scenario_cost_objective = (
vine_settings.lp_objective in
[vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS,
vine.ViNELPObjective.ViNE_COSTS_INCL_SCENARIO_COSTS]
)
result.append((matching_settings, "vine_{}_{}".format(
"LB" if is_load_balanced_objective else "COST",
"SCENARIO" if is_scenario_cost_objective else "DEF"
)))
for rounding_proc in vine.ViNERoundingProcedure:
matching_settings = []
for vine_settings in vine_settings_list:
if vine_settings.rounding_procedure == rounding_proc:
matching_settings.append(vine_settings)
if len(matching_settings) > 0 and len(matching_settings) != len(vine_settings_list):
result.append((matching_settings, "vine_{}".format(
"RAND" if rounding_proc is vine.ViNERoundingProcedure.RANDOMIZED else "DET")))
return result
@classmethod
def get_all_hs(cls):
return [cls.get_hs(vine_settings_list, name) for vine_settings_list, name in cls.get_all_vine_settings_list_with_names()]
def compute_aggregated_mean(list_of_aggregated_data, debug=False):
mean = 0.0
value_count = 0
for agg in list_of_aggregated_data:
mean += agg.mean * agg.value_count
value_count += agg.value_count
if debug:
print len(list_of_aggregated_data), value_count, mean/value_count
return mean / value_count
class HSF_Vine_Runtime(AbstractHeatmapSpecificationVineFactory):
prototype = dict(
name="ViNE: Mean Runtime [s]",
filename="vine_mean_runtime",
vmin=0,
vmax=20,
alg_variant=None,
colorbar_ticks=[x for x in range(0, 21, 4)],
cmap="Greys",
plot_type=HeatmapPlotType.ViNE,
lookup_function=lambda vine_result_dict, vine_settings_list: compute_aggregated_mean([
vine_result.total_runtime
for vine_settings in vine_settings_list
for vine_result in vine_result_dict[vine_settings]
]),
rounding_function=lambda x: int(round(x)),
)
# class HSF_Vine_MaxNodeLoad(AbstractHeatmapSpecificationVineFactory):
# prototype = dict(
# name="ViNE: Max. Node Load [%]",
# filename="max_node_load",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Oranges",
# plot_type=HeatmapPlotType.ViNE,
# lookup_function=lambda vine_result_dict, vine_settings_list: max(
# vine_result.max_node_load.max
# for vine_settings in vine_settings_list
# for vine_result in vine_result_dict[vine_settings]
# )
# )
#
# class HSF_Vine_MaxEdgeLoad(AbstractHeatmapSpecificationVineFactory):
#
# prototype = dict(
# name="ViNE: Max. Edge Load [%]",
# filename="max_edge_load",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Purples",
# plot_type=HeatmapPlotType.ViNE,
# lookup_function=lambda vine_result_dict, vine_settings_list: max(
# vine_result.max_edge_load.max
# for vine_settings in vine_settings_list
# for vine_result in vine_result_dict[vine_settings]
# )
# )
#
# class HSF_Vine_MaxLoad(AbstractHeatmapSpecificationVineFactory):
#
# prototype = dict(
# name="ViNE: MaxLoad (Edge and Node)",
# filename="max_load",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Reds",
# plot_type=HeatmapPlotType.ViNE,
# lookup_function=lambda vine_result_dict, vine_settings_list: max(
# max(vine_result.max_node_load.max, vine_result.max_edge_load.max)
# for vine_settings in vine_settings_list
# for vine_result in vine_result_dict[vine_settings]
# )
# )
class AbstractHeatmapSpecificationSepLPRRFactory(object):
prototype = dict()
@classmethod
def get_hs(cls, rr_settings, name):
result = copy.deepcopy(cls.prototype)
result['lookup_function'] = lambda x: cls.prototype['lookup_function'](x, rr_settings)
result['alg_variant'] = name
return result
@classmethod
def _get_lp_str(cls, lp_mode):
lp_str = None
if lp_mode == treewidth_model.LPRecomputationMode.NONE:
lp_str = "no_recomp"
elif lp_mode == treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION:
lp_str = "recomp_no_sep"
elif lp_mode == treewidth_model.LPRecomputationMode.RECOMPUTATION_WITH_SINGLE_SEPARATION:
lp_str = "recomp_single_sep"
else:
raise ValueError()
return lp_str
@classmethod
def _get_rounding_str(cls, rounding_mode):
rounding_str = None
if rounding_mode == treewidth_model.RoundingOrder.RANDOM:
rounding_str = "round_rand"
elif rounding_mode == treewidth_model.RoundingOrder.STATIC_REQ_PROFIT:
rounding_str = "round_static_profit"
elif rounding_mode == treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT:
rounding_str = "round_achieved_profit"
else:
raise ValueError()
return rounding_str
@classmethod
def get_specific_rr_name(cls, rr_settings):
return "rr_seplp_{}__{}".format(
cls._get_lp_str(rr_settings[0]),
cls._get_rounding_str(rr_settings[1]),
)
@classmethod
def get_all_rr_settings_list_with_names(cls):
result = []
rr_settings_list = get_list_of_rr_settings()
result.append((rr_settings_list, "rr_seplp_ALL")) #first off: every vine combination
# second: each specific one
for rr_settings in rr_settings_list:
result.append(([rr_settings], cls.get_specific_rr_name(rr_settings)))
# third: each aggregation level, when applicable, i.e. there is more than one setting for that
for lp_mode in treewidth_model.LPRecomputationMode:
matching_settings = []
for rr_settings in rr_settings_list:
if rr_settings[0] == lp_mode:
matching_settings.append(rr_settings)
if len(matching_settings) > 0 and len(matching_settings) != len(rr_settings_list):
result.append((matching_settings, "rr_seplp_{}".format(
cls._get_lp_str(lp_mode).upper())))
for rounding_mode in treewidth_model.RoundingOrder:
matching_settings = []
for rr_settings in rr_settings_list:
if rr_settings[1] == rounding_mode:
matching_settings.append(rr_settings)
if len(matching_settings) > 0 and len(matching_settings) != len(rr_settings_list):
result.append((matching_settings, "rr_seplp_{}".format(
cls._get_rounding_str(rounding_mode).upper()
)))
return result
@classmethod
def get_all_hs(cls):
return [cls.get_hs(rr_settings, name) for rr_settings, name in cls.get_all_rr_settings_list_with_names()]
# class HSF_RR_MaxNodeLoad(AbstractHeatmapSpecificationSepLPRRFactory):
# prototype = dict(
# name="RR: Max node load",
# filename="randround_max_node_load",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Reds",
# plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
# lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: 100.0 * np.mean([value for rr_seplp_settings in rr_seplp_settings_list for value in rr_seplp_result.max_node_loads[rr_seplp_settings]])
# )
#
# class HSF_RR_MaxEdgeLoad(AbstractHeatmapSpecificationSepLPRRFactory):
# prototype = dict(
# name="RR: Max edge load",
# filename="randround_max_edge_load",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Reds",
# plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
# lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: 100.0 * np.mean([value for rr_seplp_settings in rr_seplp_settings_list for value in rr_seplp_result.max_edge_loads[rr_seplp_settings]])
# )
#
# class HSF_RR_MeanProfit(AbstractHeatmapSpecificationSepLPRRFactory):
# prototype = dict(
# name="RR: Mean Profit",
# filename="randround_mean_profit",
# vmin=0.0,
# vmax=100,
# colorbar_ticks=[x for x in range(0, 101, 20)],
# cmap="Reds",
# plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
# lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: np.mean([value for rr_seplp_settings in rr_seplp_settings_list for value in rr_seplp_result.profits[rr_seplp_settings]])
# )
class HSF_RR_MeanRoundingRuntime(AbstractHeatmapSpecificationSepLPRRFactory):
prototype = dict(
name="RR: Mean Rounding Runtime",
filename="randround_mean_profit",
vmin=0.0,
vmax=200,
colorbar_ticks=[x for x in range(0, 201, 40)],
cmap="Reds",
plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: np.mean([rr_seplp_result.rounding_runtimes[rr_seplp_settings].mean for rr_seplp_settings in rr_seplp_settings_list])
)
class HSF_RR_MeanDynVMPInitTimes(AbstractHeatmapSpecificationSepLPRRFactory):
prototype = dict(
name="RR: Mean DynVMP Initialization Runtimes",
filename="randround_mean_dynvmp_initialization",
vmin=0.0,
vmax=50,
colorbar_ticks=[x for x in range(0, 51, 10)],
cmap="Reds",
plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: rr_seplp_result.lp_time_dynvmp_initialization.mean * rr_seplp_result.lp_time_dynvmp_initialization.value_count
)
@classmethod
def get_all_rr_settings_list_with_names(cls):
result = []
rr_settings_list = get_list_of_vine_settings()
result.append(([rr_settings_list[0]], "rr_seplp_ALL")) # select arbitrary rr_settings to derive plots from
return result
class HSF_RR_LP_Runtime(AbstractHeatmapSpecificationSepLPRRFactory):
prototype = dict(
name="RR: LP runtime",
filename="randround_lp_runtime",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Blues",
plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: rr_seplp_result.lp_time_optimization + rr_seplp_result.lp_time_preprocess
)
@classmethod
def get_all_rr_settings_list_with_names(cls):
result = []
rr_settings_list = get_list_of_vine_settings()
result.append(([rr_settings_list[0]], "rr_seplp_ALL")) # select arbitrary rr_settings to derive plots from
return result
class HSF_RR_Runtime(AbstractHeatmapSpecificationSepLPRRFactory):
prototype = dict(
name="RR: Rounding Runtime",
filename="randround_rounding_runtime",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Blues",
plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: np.mean([rr_seplp_result.rounding_runtimes[rr_settings].mean for rr_settings in rr_seplp_settings_list])
)
class HSF_RR_GeneratedMappings(AbstractHeatmapSpecificationSepLPRRFactory):
prototype = dict(
name="Generated Mappings [k]",
filename="lp_generated_mappings",
vmin=0.0,
vmax=2,
colorbar_ticks=[0, 0.5, 1, 1.5, 2],
cmap="Greens",
plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
lookup_function=lambda rr_seplp_result, rr_seplp_settings_list: rr_seplp_result.lp_generated_columns / 1000.0
)
@classmethod
def get_all_rr_settings_list_with_names(cls):
result = []
rr_settings_list = get_list_of_vine_settings()
result.append(([rr_settings_list[0]], "rr_seplp_ALL")) # select arbitrary rr_settings to derive plots from
return result
class AbstractHeatmapSpecificationVineVsRandRoundFactory(object):
prototype = dict()
@classmethod
def get_hs(cls, vine_settings_list, randround_settings_list, name):
result = copy.deepcopy(cls.prototype)
result['lookup_function'] = lambda x: cls.prototype['lookup_function'](x[0], x[1], vine_settings_list, randround_settings_list)
result['alg_variant'] = name
return result
# @classmethod
# def get_specific_vine_name(cls, vine_settings):
# vine.ViNESettingsFactory.check_vine_settings(vine_settings)
# is_splittable = vine_settings.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE
# is_load_balanced_objective = (
# vine_settings.lp_objective in
# [vine.ViNELPObjective.ViNE_LB_DEF, vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS]
# )
# is_scenario_cost_objective = (
# vine_settings.lp_objective in
# [vine.ViNELPObjective.ViNE_LB_INCL_SCENARIO_COSTS, vine.ViNELPObjective.ViNE_COSTS_INCL_SCENARIO_COSTS]
# )
# is_random_rounding_procedure = vine_settings.rounding_procedure == vine.ViNERoundingProcedure.RANDOMIZED
# return "vine_{}_{}_{}_{}".format(
# "mcf" if is_splittable else "sp",
# "lb" if is_load_balanced_objective else "cost",
# "scenario" if is_scenario_cost_objective else "def",
# "rand" if is_random_rounding_procedure else "det",
# )
@classmethod
def get_specific_comparison_settings_list_with_names(cls):
result = []
vine_settings_list = get_list_of_vine_settings()
rr_settings_list = get_list_of_rr_settings()
result.append((vine_settings_list, rr_settings_list, "vine_ALL_vs_randround_ALL"))
vine_settings_list_mcf = []
vine_settings_list_sp = []
for vine_settings in vine_settings_list:
if vine_settings.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE:
vine_settings_list_mcf.append(vine_settings)
else:
vine_settings_list_sp.append(vine_settings)
result.append((vine_settings_list_sp, rr_settings_list, "vine_SP_vs_randround_ALL"))
#result.append((vine_settings_list_mcf, rr_settings_list, "vine_MCF_vs_randround_ALL"))
return result
@classmethod
def get_all_hs(cls):
return [cls.get_hs(vine_settings_list, rr_settings_list, name) for vine_settings_list, rr_settings_list, name in cls.get_specific_comparison_settings_list_with_names()]
@classmethod
def get_all_hs_both_rr(cls):
# rr_setting_list = get_list_of_rr_settings()
return [(cls.get_hs(get_list_of_rr_settings(), get_list_of_rr_settings(), 'with_latencies_vs_baseline'))]
def _comparison_profit_best_relative(vine_result, rr_result, vine_settings_list, rr_settings_list):
# print vine_result
# print rr_result
# print vine_settings_list
# print rr_settings_list
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
return 100*(best_rr - best_vine) / best_vine
def _comparison_profit_best_relative_latency_study(baseline_result, with_latency_result, baseline_settings_list, with_latency_settings_list):
best_baseline = max([baseline_result.profits[rr_settings].max for rr_settings in baseline_settings_list])
best_with_latency = max([with_latency_result.profits[rr_settings].max for rr_settings in with_latency_settings_list])
return 100 * best_with_latency / best_baseline
# return (with_latency_result - baseline_result) / baseline_result
def _comparison_profit_absolute(vine_result, rr_result, vine_settings_list, rr_settings_list):
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
return best_rr - best_vine
def _comparison_profit_absolute_latency_study(baseline_result, with_latency_result, baseline_settings_list, with_latency_settings_list):
best_baseline = max([baseline_result.profits[baseline_settings].max for baseline_settings in baseline_settings_list])
best_rr = max([with_latency_result.profits[with_latency_settings].max for with_latency_settings in with_latency_settings_list])
return best_baseline - best_rr
# return with_latency_result - baseline_result
def _comparison_profit_qualitative_randround_5perc(vine_result, rr_result, vine_settings_list, rr_settings_list):
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
if (best_rr - best_vine)/ best_vine >= 0.05:
return 100
else:
return 0
def _comparison_profit_qualitative_vine_5perc(vine_result, rr_result, vine_settings_list, rr_settings_list):
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
if (best_vine - best_rr)/ best_rr >= 0.05:
return 100
else:
return 0
def _profit_relative_to_lp_bound_rr(rr_result, rr_settings_list):
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
lp_bound = rr_result.lp_profit
return 100.0*(best_rr / lp_bound)
def _profit_relative_to_lp_bound_vine(vine_result, rr_result, vine_settings_list, rr_settings_list):
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
lp_bound = rr_result.lp_profit
return 100.0*(best_vine / lp_bound)
def _relative_profit_difference_to_lp_bound(vine_result, rr_result, vine_settings_list, rr_settings_list):
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in rr_settings_list])
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in vine_settings_list])
lp_bound = rr_result.lp_profit
return 100.0*(best_rr / lp_bound) - 100.0*(best_vine / lp_bound)
class HSF_Comp_BestProfit(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Relative Profit: rand round vs ViNE",
filename="comparison_vine_rand_round",
vmin=-100,
vmax=+100,
colorbar_ticks=[x for x in range(-100, 101, 33)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _comparison_profit_best_relative(vine_result,
rr_result,
vine_settings_list,
rr_settings_list)
)
class HSF_Comp_BestProfitLatencyStudy(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Relative Profit: % of Baseline",
filename="comparison_baseline_with_latencies",
vmin=0,
vmax=+120,
colorbar_ticks=[x for x in range(0, 121, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonLatencyBaseline,
lookup_function=lambda baseline_result, with_latency_result, baseline_settings_list,
with_latency_settings_list: _comparison_profit_best_relative_latency_study(baseline_result,
with_latency_result,
baseline_settings_list,
with_latency_settings_list)
)
class HSF_Comp_AbsoluteLatencyStudy(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Absolute Profit: With Latencies vs. Baseline",
filename="absolute_profit_comp",
vmin=0,
vmax=+100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonLatencyBaseline,
lookup_function=lambda baseline_result, with_latency_result, baseline_settings_list,
with_latency_settings_list: _comparison_profit_absolute_latency_study(
baseline_result,
with_latency_result,
baseline_settings_list,
with_latency_settings_list)
)
class HSF_Comp_QualProfitDiff_RR(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Qualitative Difference > 5%: Rand Round",
filename="qual_diff_5perc_rand_round",
vmin=0,
vmax=+100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _comparison_profit_qualitative_randround_5perc(vine_result,
rr_result,
vine_settings_list,
rr_settings_list)
)
class HSF_Comp_QualProfitDiff_Vine(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Qualitative Difference > 5%: ViNE",
filename="qual_diff_5perc_vine",
vmin=0,
vmax=+100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _comparison_profit_qualitative_vine_5perc(vine_result,
rr_result,
vine_settings_list,
rr_settings_list)
)
class HSF_Comp_RelProfitToLPBound_RR(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Rel. Profit: Rand Round",
filename="rel_profit_lpbound_rr",
vmin=0,
vmax=+100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _profit_relative_to_lp_bound_rr(rr_result,
rr_settings_list)
)
class HSF_Comp_RelProfitToLPBound_Vine(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Rel. Profit: WiNE",
filename="rel_profit_lpbound_vine",
vmin=0,
vmax=+100,
colorbar_ticks=[x for x in range(0, 101, 20)],
cmap="Reds",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _profit_relative_to_lp_bound_vine(vine_result,
rr_result,
vine_settings_list,
rr_settings_list)
)
class HSF_Comp_RelProfitToLPBound_RR_minus_Vine(AbstractHeatmapSpecificationVineVsRandRoundFactory):
prototype = dict(
name="Rel. Improv.: ($\mathsf{RR}_{\mathsf{best}}$ - $\mathsf{WiNE}_{\mathsf{best}}$)/$\mathsf{LP}_{\mathsf{UB}}$ [%]",
filename="rel_profit_difference_lpbound",
vmin=-25,
vmax=+25,
colorbar_ticks=[x for x in range(-24, 25, 6)],
cmap="RdBu_r",
plot_type=HeatmapPlotType.ComparisonVineRandRound,
lookup_function=lambda vine_result, rr_result, vine_settings_list, rr_settings_list : _relative_profit_difference_to_lp_bound(vine_result,
rr_result,
vine_settings_list,
rr_settings_list)
)
global_heatmap_specfications = HSF_Vine_Runtime.get_all_hs() + \
HSF_RR_MeanRoundingRuntime.get_all_hs() + \
HSF_RR_MeanDynVMPInitTimes.get_all_hs() + \
HSF_RR_GeneratedMappings.get_all_hs() + \
HSF_RR_Runtime.get_all_hs() + \
HSF_RR_LP_Runtime.get_all_hs() + \
HSF_Comp_BestProfit.get_all_hs() + \
HSF_Comp_QualProfitDiff_RR.get_all_hs() + \
HSF_Comp_QualProfitDiff_Vine.get_all_hs() + \
HSF_Comp_RelProfitToLPBound_RR.get_all_hs() + \
HSF_Comp_RelProfitToLPBound_Vine.get_all_hs() + \
HSF_Comp_RelProfitToLPBound_RR_minus_Vine.get_all_hs()
# latency_study_specs = HSF_RR_MeanRoundingRuntime.get_all_hs() + \
# latency_study_specs = HSF_RR_GeneratedMappings.get_all_hs() + \
latency_study_specs = HSF_RR_MeanDynVMPInitTimes.get_all_hs() + \
HSF_RR_LP_Runtime.get_all_hs() + \
HSF_RR_GeneratedMappings.get_all_hs() # + \
# HSF_RR_Runtime.get_all_hs() + \
# HSF_Comp_BestProfitLatencyStudy.get_all_hs_both_rr()
# HSF_RR_LP_Runtime.get_all_hs()
# HSF_RR_Runtime.get_all_hs() + \
latency_study_specs_comparison = HSF_Comp_BestProfitLatencyStudy.get_all_hs_both_rr() + \
HSF_Comp_AbsoluteLatencyStudy.get_all_hs_both_rr()
#+ \
# HSF_Comp_RelProfitToLPBound_RR.get_all_hs()
for spec in latency_study_specs:
spec['plot_type'] = HeatmapPlotType.LatencyStudy
heatmap_specifications_per_type = {
plot_type_item: [
heatmap_specification for heatmap_specification in global_heatmap_specfications
if heatmap_specification['plot_type'] == plot_type_item
]
for plot_type_item in [HeatmapPlotType.ViNE,
HeatmapPlotType.RandRoundSepLPDynVMP,
HeatmapPlotType.ComparisonVineRandRound]
}
heatmap_specifications_per_type[HeatmapPlotType.LatencyStudy] = latency_study_specs
heatmap_specifications_per_type[HeatmapPlotType.ComparisonLatencyBaseline] = latency_study_specs_comparison
"""
Axes specifications used for the heatmap plots.
Each specification contains the following elements:
- x_axis_parameter: the parameter name on the x-axis
- y_axis_parameter: the parameter name on the y-axis
- x_axis_title: the legend of the x-axis
- y_axis_title: the legend of the y-axis
- foldername: the folder to store the respective plots in
"""
heatmap_axes_specification_resources = dict(
x_axis_parameter="node_resource_factor",
y_axis_parameter="edge_resource_factor",
x_axis_title="Node Resource Factor",
y_axis_title="Edge Resource Factor",
foldername="AXES_RESOURCES"
)
heatmap_axes_specification_requests_treewidth = dict(
x_axis_parameter="treewidth",
y_axis_parameter="number_of_requests",
x_axis_title="Treewidth",
y_axis_title="Number of Requests",
foldername="AXES_TREEWIDTH_vs_NO_REQ"
)
heatmap_axes_specification_requests_edge_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="edge_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Edge Resource Factor",
foldername="AXES_NO_REQ_vs_EDGE_RF"
)
heatmap_axes_specification_requests_node_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="node_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Node Resource Factor",
foldername="AXES_NO_REQ_vs_NODE_RF"
)
heatmap_axes_specification_treewidth_edge_rf = dict(
x_axis_parameter="treewidth",
y_axis_parameter="edge_resource_factor",
x_axis_title="Treewidth",
y_axis_title="Ede Resource Factor",
foldername="AXES_TREEWIDTH_vs_EDGE_RF"
)
heatmap_axes_specification_epsilon_nodes = dict(
x_axis_parameter="edge_resource_factor",
y_axis_parameter="node_resource_factor",
x_axis_title="Edge Resource Factor",
y_axis_title="Node Resource Factor",
foldername="AXES_RODE_RES_vs_EDGE_RF"
)
heatmap_axes_specification_epsilon_limit = dict(
x_axis_parameter="latency_approximation_factor",
y_axis_parameter="latency_approximation_limit",
x_axis_title="Epsilon",
y_axis_title="Limit",
foldername="AXES_EPSILON_LIMIT"
)
heatmap_axes_specification_type_epsilon = dict(
x_axis_parameter="latency_approximation_type",
y_axis_parameter="latency_approximation_factor",
x_axis_title="Type",
y_axis_title="Epsilon",
foldername="AXES_TYPE_EPSILON"
)
heatmap_axes_specification_type_limit = dict(
x_axis_parameter="latency_approximation_type",
y_axis_parameter="latency_approximation_limit",
x_axis_title="Type",
y_axis_title="Limit",
foldername="AXES_TYPE_LIMIT"
)
heatmap_axes_specification_type_edgeres = dict(
x_axis_parameter="latency_approximation_type",
y_axis_parameter="edge_resource_factor",
x_axis_title="Type",
y_axis_title="Edge Resource Factor",
foldername="AXES_TYPE_EDGE_RES"
)
heatmap_axes_specification_type_requests = dict(
x_axis_parameter="latency_approximation_type",
y_axis_parameter="number_of_requests",
x_axis_title="Type",
y_axis_title="Number of Requests",
foldername="AXES_TYPE_NUM_REQ"
)
heatmap_axes_specification_type_topology = dict(
x_axis_parameter="latency_approximation_type",
y_axis_parameter="topology",
x_axis_title="Type",
y_axis_title="Topology",
foldername="AXES_TYPE_TOP"
)
global_heatmap_axes_specifications = (
heatmap_axes_specification_requests_edge_load,
heatmap_axes_specification_requests_treewidth,
heatmap_axes_specification_resources,
heatmap_axes_specification_requests_node_load,
heatmap_axes_specification_treewidth_edge_rf,
)
global_heatmap_axes_specifications_latency_study = (
# heatmap_axes_specification_requests_edge_load,
# heatmap_axes_specification_resources,
# heatmap_axes_specification_requests_node_load,
# heatmap_axes_specification_epsilon_limit,
heatmap_axes_specification_type_epsilon,
heatmap_axes_specification_type_limit,
# heatmap_axes_specification_type_edgeres,
# heatmap_axes_specification_type_requests,
heatmap_axes_specification_type_topology,
)
global_heatmap_axes_specifications_latency_study_comparison = ( # has to involve 'type'
heatmap_axes_specification_type_epsilon,
heatmap_axes_specification_type_limit,
# heatmap_axes_specification_type_edgeres,
# heatmap_axes_specification_type_requests,
# heatmap_axes_specification_type_topology,
# heatmap_axes_specification_type_epsilon,
# heatmap_axes_specification_resources,
)
def compute_average_node_load(result_summary):
logger.warn("In the function compute_average_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_average_edge_load(result_summary):
logger.warn("In the function compute_average_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_node_load(result_summary):
logger.warn("In the function compute_max_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_max_edge_load(result_summary):
logger.warn("In the function compute_max_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_avg_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def get_title_for_filter_specifications(filter_specifications):
result = "\n".join(
[filter_specification['parameter'] + "=" + str(filter_specification['value']) + "; " for filter_specification in
filter_specifications])
return result[:-2]
def extract_parameter_range(scenario_parameter_space, key):
# if the scenario parameter container was merged with another, the parameter space is a list of dicts
# we iterate over all of these parameter subspaces and collect all values matching the parameter
if not isinstance(scenario_parameter_space, list):
scenario_parameter_space = [scenario_parameter_space]
path = None
values = set()
for sps in scenario_parameter_space:
min_depth = 0 if key[:7] == "latency" else 2
x = _extract_parameter_range(sps, key, min_recursion_depth=min_depth)
if x is None:
print "Could not find key {}".format(key)
continue
new_path, new_values = x
if path is None:
path = new_path
else:
assert path == new_path # this should usually not happen unless we merged incompatible parameter containers
values = values.union(new_values)
return path, sorted(values)
def _extract_parameter_range(scenario_parameter_space_dict, key, min_recursion_depth=0):
if not isinstance(scenario_parameter_space_dict, dict):
return None
for generator_name, value in scenario_parameter_space_dict.iteritems():
if generator_name == key and min_recursion_depth <= 0:
return [key], value
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = _extract_parameter_range(value, key, min_recursion_depth=min_recursion_depth - 1)
if result is not None:
path, values = result
return [generator_name, 0] + path, values
elif isinstance(value, dict):
result = _extract_parameter_range(value, key, min_recursion_depth=min_recursion_depth - 1)
if result is not None:
path, values = result
return [generator_name] + path, values
return None
def _test_():
sps = eval("{'substrate_generation': [{'substrates': {'TopologyZooReader': {'node_type_distribution': [1.0], 'node_types': [('universal',)], 'node_capacity': [100.0], 'edge_capacity': [100.0], 'node_cost_factor': [1.0], 'include_latencies': [True], 'topology': ['Geant2012']}}}], 'node_placement_restriction_mapping': [{'neighbors': {'NeighborhoodSearchRestrictionGenerator': {'potential_nodes_factor': [0.25]}}}], 'profit_calculation': [{'optimal': {'OptimalEmbeddingProfitCalculator': {'timelimit': [90], 'profit_factor': [1.0]}}}], 'request_generation': [{'cactus': {'CactusRequestGenerator': {'layers': [3], 'normalize': [True], 'fix_root_mapping': [False], 'number_of_requests': [20], 'probability': [1.0], 'edge_resource_factor': [0.25, 0.5], 'arbitrary_edge_orientations': [True], 'max_number_of_nodes': [16], 'max_cycles': [9999], 'node_resource_factor': [0.2, 0.4], 'iterations': [10000], 'fix_leaf_mapping': [False], 'min_number_of_nodes': [3], 'branching_distribution': [(0.15, 0.5, 0.35)]}}}]}")
# sps = eval("{'request_generation': [{'cactus': {'CactusRequestGenerator': {'layers': [3], 'normalize': [True], 'fix_root_mapping': [False], 'number_of_requests': [20, 30], 'probability': [1.0], 'edge_resource_factor': [0.25, 0.5, 0.75, 0.8], 'arbitrary_edge_orientations': [True], 'max_number_of_nodes': [16], 'max_cycles': [9999], 'node_resource_factor': [0.2, 0.4, 0.6, 0.8], 'iterations': [10000], 'fix_leaf_mapping': [False], 'min_number_of_nodes': [3], 'branching_distribution': [(0.15, 0.5, 0.35)]}}}], 'latency_approx': [{'latency_approximation_factor': [0.001, 0.1], 'latency_approximation_limit': [0.35, 0.9], 'latency_approximation_type': ['strict']}], 'profit_calculation': [{'optimal': {'OptimalEmbeddingProfitCalculator': {'timelimit': [90], 'profit_factor': [1.0]}}}], 'node_placement_restriction_mapping': [{'neighbors': {'NeighborhoodSearchRestrictionGenerator': {'potential_nodes_factor': [0.25]}}}], 'substrate_generation': [{'substrates': {'TopologyZooReader': {'node_type_distribution': [1.0], 'node_types': [('universal',)], 'node_capacity': [100.0], 'edge_capacity': [100.0], 'node_cost_factor': [1.0], 'include_latencies': [True], 'topology': ['Geant2012']}}}]}")
par_dict = eval("{'substrate_generation': {'substrates': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'TopologyZooReader': {'node_type_distribution': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_types': {('universal',): set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_capacity': {100.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'edge_capacity': {100.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_cost_factor': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'include_latencies': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'topology': {'Geant2012': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}, 'request_generation': {'cactus': {'CactusRequestGenerator': {'layers': {3: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'normalize': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'arbitrary_edge_orientations': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'number_of_requests': {20: set([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]), 30: set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])}, 'probability': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'edge_resource_factor': {0.25: set([0, 1, 2, 3, 4, 5, 6, 7]), 0.5: set([8, 9, 10, 11, 12, 13, 14, 15]), 0.8: set([24, 25, 26, 27, 28, 29, 30, 31]), 0.75: set([16, 17, 18, 19, 20, 21, 22, 23])}, 'fix_leaf_mapping': {False: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'max_number_of_nodes': {16: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'max_cycles': {9999: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'fix_root_mapping': {False: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'iterations': {10000: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'min_number_of_nodes': {3: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_resource_factor': {0.2: set([0, 1, 8, 9, 16, 17, 24, 25]), 0.6: set([4, 5, 12, 13, 20, 21, 28, 29]), 0.4: set([2, 3, 10, 11, 18, 19, 26, 27]), 0.8: set([6, 7, 14, 15, 22, 23, 30, 31])}, 'branching_distribution': {(0.15, 0.5, 0.35): set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}, 'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}, 'profit_calculation': {'optimal': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'OptimalEmbeddingProfitCalculator': {'timelimit': {90: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'profit_factor': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}, 'node_placement_restriction_mapping': {'neighbors': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'NeighborhoodSearchRestrictionGenerator': {'potential_nodes_factor': {0.25: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}}")
spcd = eval("{'node_placement_restriction_mapping': {'neighbors': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'NeighborhoodSearchRestrictionGenerator': {'potential_nodes_factor': {0.25: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}, 'latency_approx': [{'latency_approximation_factor': {'0.1': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), '0.001': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'latency_approximation_limit': {'0.9': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), '0.35': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'latency_approximation_type': {'strict': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}], 'profit_calculation': {'optimal': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'OptimalEmbeddingProfitCalculator': {'timelimit': {90: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'profit_factor': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}, 'request_generation': {'cactus': {'CactusRequestGenerator': {'layers': {3: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'normalize': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'arbitrary_edge_orientations': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'number_of_requests': {20: set([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]), 30: set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])}, 'probability': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'edge_resource_factor': {0.25: set([0, 1, 2, 3, 4, 5, 6, 7]), 0.5: set([8, 9, 10, 11, 12, 13, 14, 15]), 0.8: set([24, 25, 26, 27, 28, 29, 30, 31]), 0.75: set([16, 17, 18, 19, 20, 21, 22, 23])}, 'fix_leaf_mapping': {False: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'max_number_of_nodes': {16: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'max_cycles': {9999: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'fix_root_mapping': {False: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'iterations': {10000: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'min_number_of_nodes': {3: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_resource_factor': {0.2: set([0, 1, 8, 9, 16, 17, 24, 25]), 0.6: set([4, 5, 12, 13, 20, 21, 28, 29]), 0.4: set([2, 3, 10, 11, 18, 19, 26, 27]), 0.8: set([6, 7, 14, 15, 22, 23, 30, 31])}, 'branching_distribution': {(0.15, 0.5, 0.35): set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}, 'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}, 'substrate_generation': {'substrates': {'all': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), 'TopologyZooReader': {'node_type_distribution': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_types': {('universal',): set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_capacity': {100.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'edge_capacity': {100.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'node_cost_factor': {1.0: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'include_latencies': {True: set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}, 'topology': {'Geant2012': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])}}}}}")
curr = eval("{'0.1': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]), '0.001': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])} ")
#
moo = eval("{'RandRoundSepLPOptDynVMPCollection': {'GUROBI_PARAMETERS': {'threads': {1: set([0, 1, 2, 3])}}, 'all': set([0, 1, 2, 3]), 'ALGORITHM_PARAMETERS': {'number_initial_mappings_to_compute': {50: set([0, 1, 2, 3])}, 'rounding_samples_per_lp_recomputation_mode': {(('NONE', 50), ('RECOMPUTATION_WITHOUT_SEPARATION', 2)): set([0, 1, 2, 3])}, 'rounding_order_list': {('RAND', 'STATIC_REQ_PROFIT', 'ACHIEVED_REQ_PROFIT'): set([0, 1, 2, 3])}, 'latency_approximation_factor': {0.001: set([2, 3]), 0.1: set([0, 1])}, 'lp_relative_quality': {0.001: set([0, 1, 2, 3])}, 'latency_approximation_limit': {0.35: set([1, 3]), 0.9: set([0, 2])}, 'lp_recomputation_mode_list': {('NONE', 'RECOMPUTATION_WITHOUT_SEPARATION'): set([0, 1, 2, 3])}, 'latency_approximation_type': {'strict': set([0, 1, 2, 3])}, 'number_further_mappings_to_add': {10: set([0, 1, 2, 3])}}}}")
key = 'latency_approximation_limit'
x = _extract_parameter_range(sps, key, min_recursion_depth=0)
print x
_test_()
def extract_latency_parameters(algorithm_parameter_list, filter_exec_params=None):
lat_params = dict(
latency_approximation_factor=set(),
latency_approximation_limit=set(),
latency_approximation_type=set()
)
for pars in algorithm_parameter_list:
algorithm_params = pars['ALGORITHM_PARAMETERS']
for lat_key in lat_params.keys():
if filter_exec_params is not None and lat_key in filter_exec_params.keys():
lat_params[lat_key] = [filter_exec_params[lat_key]]
else:
lat_params[lat_key].add(algorithm_params[lat_key])
for key, value in lat_params.iteritems():
lat_params[key] = list(value)
return lat_params
def find_scenarios_for_params(solution_container, algorithm_id, lat_params):
lat_scenarios = dict()
for key, valueList in lat_params.iteritems():
valueDict = {}
for value in valueList:
valueDict[value] = set()
lat_scenarios[key] = valueDict
container = solution_container.algorithm_scenario_solution_dictionary[algorithm_id]
exec_param_container = solution_container.execution_parameter_container.get_execution_ids(ALG_ID=algorithm_id)
exec_id_lookup = solution_container.execution_parameter_container.reverse_lookup['RandRoundSepLPOptDynVMPCollection']['ALGORITHM_PARAMETERS']
for scenario_id in range(len(container)):
print scenario_id
# print ['latency_approximation_factor']
# exit()
#
#
# scenario_parameters = solution_container.retrieve_scenario_parameters_for_index(scenario_id)
# print scenario_parameters
#
# for execution_id in exec_param_container.get_execution_ids(ALG_ID=algorithm_id):
#
# container = exec_param_container.algorithm_parameter_list[execution_id]['ALGORITHM_PARAMETERS']
#
# print container
# exit()
#
#
# # if exec passt zu scenario id:
#
# lat_scenarios[key][container[key]].add(scenario_id)
exit()
return lat_scenarios
def extract_generation_parameters(scenario_parameter_dict, scenario_id):
if not isinstance(scenario_parameter_dict, dict):
return None
results = []
for generator_name, value in scenario_parameter_dict.iteritems():
if isinstance(value, set) and generator_name != "all" and scenario_id in value:
return [[generator_name]]
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
elif isinstance(value, dict):
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
if results == []:
return None
else:
# print "returning {}".format(results)
return results
def lookup_scenarios_having_specific_values(scenario_parameter_space_dict, path, value):
current_path = path[:]
current_dict = scenario_parameter_space_dict
while len(current_path) > 0:
if isinstance(current_path[0], basestring):
current_dict = current_dict[current_path[0]]
current_path.pop(0)
elif current_path[0] == 0:
current_path.pop(0)
# print current_dict
return current_dict[value]
def lookup_scenario_parameter_room_dicts_on_path(scenario_parameter_space_dict, path):
current_path = path[:]
current_dict_or_list = scenario_parameter_space_dict
dicts_on_path = []
while len(current_path) > 0:
dicts_on_path.append(current_dict_or_list)
if isinstance(current_path[0], basestring):
current_dict_or_list = current_dict_or_list[current_path[0]]
current_path.pop(0)
elif isinstance(current_path[0], int):
current_dict_or_list = current_dict_or_list[int(current_path[0])]
current_path.pop(0)
else:
raise RuntimeError("Could not lookup dicts.")
return dicts_on_path
def load_reduced_pickle(reduced_pickle):
with open(reduced_pickle, "rb") as f:
data = pickle.load(f)
return data
class AbstractPlotter(object):
''' Abstract Plotter interface providing functionality used by the majority of plotting classes of this module.
'''
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True,
filter_exec_params=None,
):
self.output_path = output_path
self.output_filetype = output_filetype
self.scenario_solution_storage = scenario_solution_storage
self.algorithm_id = algorithm_id
self.execution_id = execution_id
self.scenario_parameter_dict = self.scenario_solution_storage.scenario_parameter_container.scenario_parameter_dict
self.scenarioparameter_room = self.scenario_solution_storage.scenario_parameter_container.scenarioparameter_room
self.all_scenario_ids = set(scenario_solution_storage.algorithm_scenario_solution_dictionary[self.algorithm_id].keys())
lat_params = extract_latency_parameters(
scenario_solution_storage.execution_parameter_container.algorithm_parameter_list,
filter_exec_params
)
combined_dict = dict(self.scenario_solution_storage.scenario_parameter_container.scenarioparameter_room)
combined_dict.update({'latency_approx': [lat_params]})
self.scenarioparameter_room = combined_dict
# lat_scenario = find_scenarios_for_params(self.scenario_solution_storage, algorithm_id, lat_params)
# scen_param_dict = dict(self.scenario_solution_storage.scenario_parameter_container.scenario_parameter_dict)
# scen_param_dict.update({'latency_approx': lat_scenario})
# self.scenario_parameter_dict = scen_param_dict
self.show_plot = show_plot
self.save_plot = save_plot
self.overwrite_existing_files = overwrite_existing_files
if not forbidden_scenario_ids:
self.forbidden_scenario_ids = set()
else:
self.forbidden_scenario_ids = forbidden_scenario_ids
self.paper_mode = paper_mode
def _construct_output_path_and_filename(self, title, filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(self.output_filetype)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(self.output_path)
date = strftime("%Y-%m-%d", gmtime())
output_path = os.path.join(base, date, self.output_filetype, "general_plots", filter_spec_path)
filename = os.path.join(output_path, title + "_" + filter_filename)
return output_path, filename
def _construct_path_and_filename_for_filter_spec(self, filter_specifications):
filter_path = ""
filter_filename = ""
for spec in filter_specifications:
filter_path = os.path.join(filter_path, (spec['parameter'] + "_" + str(spec['value'])))
filter_filename += spec['parameter'] + "_" + str(spec['value']) + "_"
filter_filename = filter_filename[:-1] + "." + self.output_filetype
return filter_path, filter_filename
def _obtain_scenarios_based_on_filters(self, filter_specifications=None):
allowed_scenario_ids = set(self.all_scenario_ids)
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
if filter_specifications:
for filter_specification in filter_specifications:
filter_path, _ = extract_parameter_range(sps, filter_specification['parameter'])
filter_indices = lookup_scenarios_having_specific_values(spd, filter_path,
filter_specification['value'])
allowed_scenario_ids = allowed_scenario_ids & filter_indices
return allowed_scenario_ids
def _obtain_scenarios_based_on_axis(self, axis_path, axis_value):
spd = self.scenario_parameter_dict
return lookup_scenarios_having_specific_values(spd, axis_path, axis_value)
def _show_and_or_save_plots(self, output_path, filename, perform_tight_layout=True):
if perform_tight_layout:
plt.tight_layout()
if self.save_plot:
if not os.path.exists(output_path):
os.makedirs(output_path)
print "saving plot: {}".format(filename)
plt.savefig(filename)
if self.show_plot:
plt.show()
plt.close()
def plot_figure(self, filter_specifications):
raise RuntimeError("This is an abstract method")
class SingleHeatmapPlotter(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
heatmap_plot_type,
filter_type=None,
filter_execution_params=None,
list_of_axes_specifications=global_heatmap_axes_specifications,
list_of_metric_specifications=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(SingleHeatmapPlotter, self).__init__(output_path, output_filetype, scenario_solution_storage,
algorithm_id, execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode, filter_execution_params)
if heatmap_plot_type is None or heatmap_plot_type not in HeatmapPlotType.VALUE_RANGE:
raise RuntimeError("heatmap_plot_type {} is not a valid input. Must be of type HeatmapPlotType.".format(heatmap_plot_type))
self.heatmap_plot_type = heatmap_plot_type
if not list_of_axes_specifications:
raise RuntimeError("Axes need to be provided.")
self.list_of_axes_specifications = list_of_axes_specifications
if not list_of_metric_specifications:
self.list_of_metric_specifications = heatmap_specifications_per_type[self.heatmap_plot_type]
else:
for metric_specification in list_of_metric_specifications:
if metric_specification.plot_type != self.heatmap_plot_type:
raise RuntimeError("The metric specification {} does not agree with the plot type {}.".format(metric_specification, self.heatmap_plot_type))
self.list_of_metric_specifications = list_of_metric_specifications
self.exec_id_lookup = self.scenario_solution_storage.execution_parameter_container.reverse_lookup[algorithm_id][
'ALGORITHM_PARAMETERS']
self.execution_id_filter = self.scenario_solution_storage.execution_parameter_container.get_execution_ids(ALG_ID=algorithm_id)
if filter_type is not None and filter_type in ['no latencies', 'strict', 'flex']:
self.execution_id_filter = self.exec_id_lookup['latency_approximation_type'][filter_type]
if filter_execution_params is not None:
for key, value in filter_execution_params.iteritems():
try:
filter_key = self.exec_id_lookup[key][value]
self.execution_id_filter = self.execution_id_filter & filter_key
except:
print "Key Error\n", self.exec_id_lookup[key]
exit(1)
print "Using Exec ID filter: ", self.execution_id_filter
def _construct_output_path_and_filename(self, metric_specification,
heatmap_axes_specification,
filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(self.output_filetype)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(self.output_path)
date = strftime("%Y-%m-%d", gmtime())
axes_foldername = heatmap_axes_specification['foldername']
sub_param_string = metric_specification['alg_variant']
if sub_param_string is not None:
output_path = os.path.join(base, date, self.output_filetype, axes_foldername, sub_param_string, filter_spec_path)
else:
output_path = os.path.join(base, date, self.output_filetype, axes_foldername, filter_spec_path)
fname = "__".join(str(x) for x in [
metric_specification['filename'],
filter_filename,
])
filename = os.path.join(output_path, fname)
return output_path, filename
def plot_figure(self, filter_specifications):
for axes_specification in self.list_of_axes_specifications:
for metric_specfication in self.list_of_metric_specifications:
self.plot_single_heatmap_general(metric_specfication, axes_specification, filter_specifications)
def _read_from_solution_dicts(self, solution_dicts, exec_id):
return
def _lookup_solutions(self, scenario_ids):
solution_dicts = [self.scenario_solution_storage.get_solutions_by_scenario_index(x) for x in scenario_ids]
result = [x[self.algorithm_id][self.execution_id] for x in solution_dicts]
#todo check whether this is okay...
# if self.heatmap_plot_type == HeatmapPlotType.ViNE:
# # result should be a list of dicts mapping vine_settings to lists of ReducedOfflineViNEResultCollection instances
# if result and self.algorithm_sub_parameter not in result[0]:
# return None
# elif self.heatmap_plot_type == HeatmapPlotType.RandRoundSepLPDynVMP:
# # result should be a list of ReducedRandRoundSepLPOptDynVMPCollectionResult instances
# if result and self.algorithm_sub_parameter not in result[0].profits:
# return None
return result
def _lookup_solutions_by_execution(self, scenario_ids, x_key, x_val, y_key, y_val, solution_container=None):
if solution_container is None:
solution_container = self.scenario_solution_storage
try:
x_axis_exec_ids = self.exec_id_lookup[x_key][x_val]
except KeyError:
x_axis_exec_ids = solution_container.execution_parameter_container.get_execution_ids(ALG_ID=self.algorithm_id)
path_x_axis, _ = extract_parameter_range(self.scenario_parameter_dict, x_key)
x_axis_scenarios = lookup_scenarios_having_specific_values(self.scenario_parameter_dict, path_x_axis, x_val)
scenario_ids = scenario_ids & x_axis_scenarios
try:
y_axis_exec_ids = self.exec_id_lookup[y_key][y_val]
except KeyError:
y_axis_exec_ids = solution_container.execution_parameter_container.get_execution_ids(ALG_ID=self.algorithm_id)
path_y_axis, _ = extract_parameter_range(self.scenario_parameter_dict, y_key)
y_axis_scenarios = lookup_scenarios_having_specific_values(self.scenario_parameter_dict, path_y_axis, y_val)
scenario_ids = scenario_ids & y_axis_scenarios
exec_ids_to_consider = x_axis_exec_ids & y_axis_exec_ids & self.execution_id_filter
# except KeyError as e:
# print "key not found, ", e
# return self._lookup_solutions(scenario_ids)
print "Using Exec_IDS: ", exec_ids_to_consider
print "Using Scenarios: ", scenario_ids
solution_dicts = [solution_container.get_solutions_by_scenario_index(x) for x in scenario_ids]
results = [solution[self.algorithm_id][exec_id] for solution in solution_dicts for exec_id in exec_ids_to_consider]
return results
def plot_single_heatmap_general(self,
heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications=None):
# data extraction
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
output_path, filename = self._construct_output_path_and_filename(heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
# check if filter specification conflicts with axes specification
if filter_specifications is not None:
for filter_specification in filter_specifications:
if (heatmap_axes_specification['x_axis_parameter'] == filter_specification['parameter'] or
heatmap_axes_specification['y_axis_parameter'] == filter_specification['parameter']):
logger.debug("Skipping generation of {} as the filter specification conflicts with the axes specification.")
return
path_x_axis, xaxis_parameters = extract_parameter_range(
sps,
heatmap_axes_specification['x_axis_parameter'],
)
path_y_axis, yaxis_parameters = extract_parameter_range(
sps,
heatmap_axes_specification['y_axis_parameter'],
)
# for heatmap plot
xaxis_parameters.sort()
yaxis_parameters.sort()
# all heatmap values will be stored in X
X = np.zeros((len(yaxis_parameters), len(xaxis_parameters)))
column_labels = yaxis_parameters
row_labels = xaxis_parameters
min_number_of_observed_values = 10000000000000
max_number_of_observed_values = 0
observed_values = np.empty(0)
for x_index, x_val in enumerate(xaxis_parameters):
# all scenario indices which has x_val as xaxis parameter (e.g. node_resource_factor = 0.5
if path_x_axis[-1][:7] != "latency":
scenario_ids_matching_x_axis = lookup_scenarios_having_specific_values(spd, path_x_axis, x_val)
else:
scenario_ids_matching_x_axis = self.all_scenario_ids
# if self.heatmap_plot_type not in [HeatmapPlotType.LatencyStudy, HeatmapPlotType.ComparisonLatencyBaseline] \
for y_index, y_val in enumerate(yaxis_parameters):
if path_x_axis[-1][:7] != "latency":
scenario_ids_matching_y_axis = lookup_scenarios_having_specific_values(spd, path_y_axis, y_val)
else:
scenario_ids_matching_y_axis = self.all_scenario_ids
# if self.heatmap_plot_type not in [HeatmapPlotType.LatencyStudy, HeatmapPlotType.ComparisonLatencyBaseline] \
# else set([i for i in range(len(self.scenario_solution_storage.algorithm_scenario_solution_dictionary[self.algorithm_id]))])
filter_indices = self._obtain_scenarios_based_on_filters(filter_specifications)
scenario_ids_to_consider = (scenario_ids_matching_x_axis &
scenario_ids_matching_y_axis &
filter_indices) - self.forbidden_scenario_ids
if self.heatmap_plot_type in [HeatmapPlotType.LatencyStudy, HeatmapPlotType.ComparisonLatencyBaseline]:
solutions = self._lookup_solutions_by_execution(scenario_ids_to_consider, heatmap_axes_specification['x_axis_parameter'], x_val, heatmap_axes_specification['y_axis_parameter'], y_val)
else:
solutions = self._lookup_solutions(scenario_ids_to_consider)
# for solution in solutions:
# print solution
values = [heatmap_metric_specification['lookup_function'](solution)
for solution in solutions]
if 'metric_filter' in heatmap_metric_specification:
values = [value for value in values if heatmap_metric_specification['metric_filter'](value)]
observed_values = np.append(observed_values, values)
if len(values) < min_number_of_observed_values:
min_number_of_observed_values = len(values)
if len(values) > max_number_of_observed_values:
max_number_of_observed_values = len(values)
logger.debug("values are {}".format(values))
m = np.nanmean(values)
logger.debug("mean is {}".format(m))
if 'rounding_function' in heatmap_metric_specification:
rounded_m = heatmap_metric_specification['rounding_function'](m)
else:
rounded_m = float("{0:.1f}".format(round(m, 2)))
X[y_index, x_index] = rounded_m
if min_number_of_observed_values == max_number_of_observed_values:
solution_count_string = "{} values per square".format(min_number_of_observed_values)
else:
solution_count_string = "between {} and {} values per square".format(min_number_of_observed_values,
max_number_of_observed_values)
fig, ax = plt.subplots(figsize=FIGSIZE)
if self.paper_mode:
ax.set_title(heatmap_metric_specification['name'], fontsize=17)
else:
title = heatmap_metric_specification['name'] + "\n"
title += heatmap_metric_specification['alg_variant'] + "\n"
if filter_specifications:
title += get_title_for_filter_specifications(filter_specifications) + "\n"
title += solution_count_string + "\n"
title += "min: {:.4f}; mean: {:.4f}; max: {:.4f}".format(np.nanmin(observed_values),
np.nanmean(observed_values),
np.nanmax(observed_values))
ax.set_title(title)
heatmap = ax.pcolor(X,
cmap=heatmap_metric_specification['cmap'],
vmin=heatmap_metric_specification['vmin'],
vmax=heatmap_metric_specification['vmax'])
for x_index in range(X.shape[1]):
for y_index in range(X.shape[0]):
plt.text(x_index + .5,
y_index + .45,
X[y_index, x_index],
verticalalignment="center",
horizontalalignment="center",
fontsize=17.5,
fontname="Courier New",
# family="monospace",
color='w',
path_effects=[PathEffects.withStroke(linewidth=4, foreground="k")]
)
if not self.paper_mode:
fig.colorbar(heatmap, label=heatmap_metric_specification['name'] + ' - mean in blue')
else:
ticks = heatmap_metric_specification['colorbar_ticks']
tick_labels = [str(tick).ljust(3) for tick in ticks]
cbar = fig.colorbar(heatmap)
cbar.set_ticks(ticks)
cbar.set_ticklabels(tick_labels)
# for label in cbar.ax.get_yticklabels():
# label.set_fontproperties(font_manager.FontProperties(family="Courier New",weight='bold'))
cbar.ax.tick_params(labelsize=15.5)
ax.set_yticks(np.arange(X.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(X.shape[1]) + 0.5, minor=False)
ax.set_xticklabels(row_labels, minor=False, fontsize=15.5)
ax.set_xlabel(heatmap_axes_specification['x_axis_title'], fontsize=16)
ax.set_ylabel(heatmap_axes_specification['y_axis_title'], fontsize=16)
ax.set_yticklabels(column_labels, minor=False, fontsize=15.5)
self._show_and_or_save_plots(output_path, filename)
plt.close(fig)
def _construct_filter_specs(scenario_parameter_space_dict, parameter_filter_keys, maxdepth=3):
parameter_value_dic = dict()
for parameter in parameter_filter_keys:
_, parameter_values = extract_parameter_range(scenario_parameter_space_dict,
parameter)
parameter_value_dic[parameter] = parameter_values
# print parameter_value_dic.values()
result_list = [None]
for i in range(1, maxdepth + 1):
for combi in combinations(parameter_value_dic, i):
values = []
for element_of_combi in combi:
values.append(parameter_value_dic[element_of_combi])
for v in product(*values):
_filter = []
for (parameter, value) in zip(combi, v):
_filter.append({'parameter': parameter, 'value': value})
result_list.append(_filter)
return result_list
class ComparisonHeatmapPlotter(SingleHeatmapPlotter):
def __init__(self,
output_path,
output_filetype,
vine_solution_storage,
vine_algorithm_id,
vine_execution_id,
randround_scenario_solution_storage,
randround_algorithm_id,
randround_execution_id,
heatmap_plot_type,
list_of_axes_specifications = global_heatmap_axes_specifications,
list_of_metric_specifications = None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(ComparisonHeatmapPlotter, self).__init__(output_path,
output_filetype,
vine_solution_storage,
vine_algorithm_id,
vine_execution_id,
heatmap_plot_type,
list_of_axes_specifications,
list_of_metric_specifications,
show_plot,
save_plot,
overwrite_existing_files,
forbidden_scenario_ids,
paper_mode)
self.randround_scenario_solution_storage = randround_scenario_solution_storage
self.randround_algorithm_id = randround_algorithm_id
self.randround_execution_id = randround_execution_id
if heatmap_plot_type != HeatmapPlotType.ComparisonVineRandRound and heatmap_plot_type != HeatmapPlotType.ComparisonLatencyBaseline:
raise RuntimeError("Only comparison heatmap plots are allowed")
def _lookup_solutions(self, scenario_ids):
return [(self.scenario_solution_storage.get_solutions_by_scenario_index(x)[self.algorithm_id][self.execution_id],
self.randround_scenario_solution_storage.get_solutions_by_scenario_index(x)[self.randround_algorithm_id][self.randround_execution_id])
for x in scenario_ids]
class LatencyStudyPlotter(SingleHeatmapPlotter):
def __init__(self,
output_path,
output_filetype,
baseline_solution_storage,
with_latencies_solution_storage,
algorithm_id,
heatmap_plot_type,
comparison=False,
filter_type=None,
filter_exec_params=None,
list_of_axes_specifications=global_heatmap_axes_specifications_latency_study,
list_of_metric_specifications=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(LatencyStudyPlotter, self).__init__(output_path,
output_filetype,
with_latencies_solution_storage,
algorithm_id,
0,
heatmap_plot_type,
filter_type,
filter_exec_params,
list_of_axes_specifications,
list_of_metric_specifications,
show_plot,
save_plot,
overwrite_existing_files,
forbidden_scenario_ids,
paper_mode)
self.baseline_solution_storage = baseline_solution_storage
self.is_comparison = comparison
if baseline_solution_storage is not None and not comparison:
self.scenarioparameter_room['latency_approx'][0]['latency_approximation_type'].append('no latencies')
def _lookup_solutions_by_execution(self, scenario_ids, x_key, x_val, y_key, y_val, solution_container=None):
print x_key, " : ", x_val, " & ", y_key , " : ", y_val
if self.baseline_solution_storage is not None:
if x_key == "latency_approximation_type":
path_y_axis, _ = extract_parameter_range(self.scenarioparameter_room, y_key)
if y_key[:7] != "latency":
y_axis_scenarios = lookup_scenarios_having_specific_values(self.scenario_parameter_dict, path_y_axis, y_val)
else:
y_axis_scenarios = self.all_scenario_ids
scenario_ids = scenario_ids & y_axis_scenarios
solution_dicts_baseline = [self.baseline_solution_storage.get_solutions_by_scenario_index(x) for x in scenario_ids]
if x_val == "no latencies":
return [x[self.algorithm_id][self.execution_id] for x in solution_dicts_baseline]
elif self.is_comparison:
solution_dicts = [self.scenario_solution_storage.get_solutions_by_scenario_index(x) \
for x in scenario_ids]
y_axis_exec_ids = self.exec_id_lookup.get(y_key, {}).get(y_val, self.execution_id_filter)
x_axis_exec_ids = self.exec_id_lookup.get(x_key, {}).get(x_val, self.execution_id_filter)
exec_ids_to_consider = y_axis_exec_ids & x_axis_exec_ids & self.execution_id_filter
print " Using Exec_IDS: ", exec_ids_to_consider
print " Using Scenarios: ", scenario_ids
return [(x[self.algorithm_id][self.execution_id], y[self.algorithm_id][exec_id]) \
for (x, y) in zip(solution_dicts_baseline, solution_dicts) \
for exec_id in exec_ids_to_consider]
elif y_key == "latency_approximation_type":
path_x_axis, _ = extract_parameter_range(self.scenarioparameter_room, x_key)
if x_key[:7] != "latency":
x_axis_scenarios = lookup_scenarios_having_specific_values(self.scenario_parameter_dict, path_x_axis, x_val)
else:
x_axis_scenarios = self.all_scenario_ids
scenario_ids = scenario_ids & x_axis_scenarios
solution_dicts_baseline = [self.baseline_solution_storage.get_solutions_by_scenario_index(x) for x in scenario_ids]
if y_val == "no latencies":
return [x[self.algorithm_id][self.execution_id] for x in solution_dicts_baseline]
elif self.is_comparison:
solution_dicts = [self.scenario_solution_storage.get_solutions_by_scenario_index(x) \
for x in scenario_ids]
y_axis_exec_ids = self.exec_id_lookup.get(y_key, {}).get(y_val, self.execution_id_filter)
x_axis_exec_ids = self.exec_id_lookup.get(x_key, {}).get(x_val, self.execution_id_filter)
exec_ids_to_consider = y_axis_exec_ids & x_axis_exec_ids & self.execution_id_filter
print " Using Exec_IDS: ", exec_ids_to_consider
print " Using Scenarios: ", scenario_ids
return [(y[self.algorithm_id][exec_id], x[self.algorithm_id][self.execution_id]) \
for (x, y) in zip(solution_dicts_baseline, solution_dicts) \
for exec_id in exec_ids_to_consider]
# solution_dicts = [self.scenario_solution_storage.get_solutions_by_scenario_index(x) for x in
# scenario_ids]
# result = [x[self.algorithm_id][self.execution_id] for x in solution_dicts]
# return zip(result_baseline, result)
elif self.is_comparison: # no axis is type
solution_dicts = [self.scenario_solution_storage.get_solutions_by_scenario_index(x) for x in scenario_ids]
solution_dicts_baseline = [self.baseline_solution_storage.get_solutions_by_scenario_index(x) for x in scenario_ids]
y_axis_exec_ids = self.exec_id_lookup.get(y_key, {}).get(y_val, self.execution_id_filter)
x_axis_exec_ids = self.exec_id_lookup.get(x_key, {}).get(x_val, self.execution_id_filter)
exec_ids_to_consider = y_axis_exec_ids & x_axis_exec_ids & self.execution_id_filter
print " Using Exec_IDS: ", exec_ids_to_consider
print " Using Scenarios: ", scenario_ids
return [(x[self.algorithm_id][self.execution_id], y[self.algorithm_id][exec_id]) \
for (x, y) in zip(solution_dicts_baseline, solution_dicts) \
for exec_id in exec_ids_to_consider]
return super(LatencyStudyPlotter, self)._lookup_solutions_by_execution(scenario_ids,
x_key, x_val, y_key, y_val, self.scenario_solution_storage)
class ComparisonPlotter_ECDF_BoxPlot(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
vine_solution_storage,
vine_algorithm_id,
vine_execution_id,
randround_solution_storage,
randround_algorithm_id,
randround_execution_id,
both_randround=False,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True,
vine_settings_to_consider=None,
rr_settings_to_consider=None,
request_sets=None
):
super(ComparisonPlotter_ECDF_BoxPlot, self).__init__(output_path, output_filetype, vine_solution_storage,
vine_algorithm_id, vine_execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode)
self.randround_solution_storage = randround_solution_storage
self.randround_algorithm_id = randround_algorithm_id
self.randround_execution_id = randround_execution_id
self.both_randround = both_randround
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(self.scenarioparameter_room,
"number_of_requests")
self._number_of_requests_list = list_number_of_requests
self._filter_path_number_of_requests = filter_path_number_of_requests
filter_path_edge_rf, list_edge_rfs = extract_parameter_range(self.scenarioparameter_room,
"edge_resource_factor")
self._edge_rfs_list = list_edge_rfs
self._filter_path_edge_rf = filter_path_edge_rf
self.vine_settings_to_consider = vine_settings_to_consider
self.rr_settings_to_consider = rr_settings_to_consider
if self.vine_settings_to_consider is None:
self.vine_settings_to_consider = get_list_of_vine_settings()
if self.rr_settings_to_consider is None:
self.rr_settings_to_consider = get_list_of_rr_settings()
if request_sets is None:
self.request_sets = [[40,60], [80,100]]
else:
self.request_sets = request_sets
def _lookup_vine_solution(self, scenario_id):
if self.both_randround:
return self.scenario_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.randround_algorithm_id][
self.randround_execution_id]
else:
return self.scenario_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.algorithm_id][self.execution_id]
def _lookup_randround_solution(self, scenario_id):
return self.randround_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.randround_algorithm_id][self.randround_execution_id]
def _compute_profit_best_rr_div_best_vine(self, vine_result, rr_result):
best_rr = max([rr_result.profits[rr_settings].max for rr_settings in self.rr_settings_to_consider])
if self.both_randround:
best_vine = max([vine_result.profits[vine_settings].max for vine_settings in self.vine_settings_to_consider])
else:
best_vine = max([vine_result[vine_settings][0].profit.max for vine_settings in self.vine_settings_to_consider])
return best_rr / best_vine
def compute_relative_profits_arrays(self, list_of_scenarios):
result = {edge_rf :
{number_of_requests: None
for number_of_requests in self._number_of_requests_list}
for edge_rf in self._edge_rfs_list
}
for edge_rf in self._edge_rfs_list:
for number_of_requests in self._number_of_requests_list:
scenario_ids_with_right_edge_rf = self._obtain_scenarios_based_on_filters([{"parameter": "edge_resource_factor", "value": edge_rf}])
scenario_ids_with_right_number_requests = self._obtain_scenarios_based_on_filters([{"parameter": "number_of_requests", "value": number_of_requests}])
scenario_ids_to_consider = set(list_of_scenarios)
scenario_ids_to_consider &= scenario_ids_with_right_edge_rf
scenario_ids_to_consider &= scenario_ids_with_right_number_requests
result[edge_rf][number_of_requests] = np.full(len(scenario_ids_to_consider), np.NaN)
for i, scenario_id in enumerate(scenario_ids_to_consider):
vine_result = self._lookup_vine_solution(scenario_id)
rr_result = self._lookup_randround_solution(scenario_id)
result[edge_rf][number_of_requests][i] = self._compute_profit_best_rr_div_best_vine(vine_result, rr_result)
return result
def plot_figure(self, filter_specifications):
self.plot_profit_ecdf(filter_specifications)
self.plot_relative_performance_Vine_and_RandRound(filter_specifications)
def plot_profit_ecdf(self, filter_specifications):
output_filename = "ECDF_profit"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(
output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_relative_profits_arrays(scenario_ids)
print result
fig, axs = plt.subplots(nrows=2, figsize=FIGSIZE, sharex="col", sharey="row")
# ax.set_xscale("log", basex=10)
#colors_erf = ['k', 'g', 'b', 'r', 'y']
colors_erf = [plt.cm.inferno(val) for val in [0.8,0.6,0.4,0.2,0.0]]
max_observed_value = 0
linestyles = [":", "-.", "--", "-"]
number_requests_legend_handlers = []
erf_legend_handlers = []
for j, number_of_requests_list in enumerate(self.request_sets):
for i, erf in enumerate(self._edge_rfs_list):
result_slice = np.zeros(0)
print " - - - - -\n", result, "\n", number_of_requests_list, "\n- - - - - ----------"
for number_of_requests in number_of_requests_list:
result_slice = np.concatenate((result_slice, result[erf][number_of_requests]))
ratio_rr_better = (len(np.where(result_slice > 1.29999)[0]))/(float(len(result_slice)))
print "{:0.2f} {:^12s} {:0.10f}".format(erf, number_of_requests_list, ratio_rr_better)
sorted_data = np.sort(result_slice[~np.isnan(result_slice)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1, len(sorted_data) + 1) / float(len(sorted_data))
yvals *= 100
sorted_data *= 100
axs[j].plot(sorted_data, yvals, color=colors_erf[i], alpha=0.8, linestyle="-",
label="{} {}".format(erf, number_of_requests_list), linewidth=2.8)
# if j == 0:
# number_requests_legend_handlers.append(
# matplotlib.lines.Line2D([], [], color='gray', linestyle=linestyles[j+2],
# label='{}'.format(number_of_requests_list)))
if j == 0:
erf_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors_erf[i], linestyle="-", linewidth=2.4,
label='{}'.format(erf)))
ax = axs[j]
#ax.set_title("#Requests: {} & {}".format(number_of_requests_list[0],number_of_requests_list[1]), fontsize=15)
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
print number_of_requests_list
ax.text(25, 95, "#req.:\n{} & {}".format(number_of_requests_list[0],number_of_requests_list[1]), fontsize=13, bbox=props, verticalalignment="top")
#ax.set_ylabel("ECDF [%]", fontsize=14)
ax.grid(True, which="both", linestyle=":")
ax.set_xlim(20,200)
major_x = [40, 70, 100, 130, 160, 190]
minor_x = [25, 55, 85, 115, 145, 175]
ax.set_xticks(major_x, minor=False)
ax.set_xticks(minor_x, minor=True)
for x in major_x:
if x == 100:
ax.axvline(x, linestyle=':', color='red', alpha=0.6, linewidth=0.8)
else:
ax.axvline(x, linestyle=':', color='gray', alpha=0.4, linewidth=0.8)
major_y = [0, 25, 50, 75, 100]
ax.set_yticks(major_y, minor=False)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14.5)
if j == 1:
ax.set_xlabel("profit($\mathsf{RR}_{\mathsf{best}}$) / profit($\mathsf{WiNE}_{\mathsf{best}}$) [%]", fontsize=15)
fig.text(0.01, 0.54, 'ECDF [%]', va='center', rotation='vertical', fontsize=15)
fig.subplots_adjust(top=0.9)
fig.subplots_adjust(bottom=0.18)
fig.subplots_adjust(right=0.78)
fig.subplots_adjust(hspace=0.1)
fig.subplots_adjust(left=0.16)
first_legend = plt.legend(handles=erf_legend_handlers, title="ERF", loc=4, fontsize=14,
handletextpad=0.35, bbox_to_anchor=(1,0.25), bbox_transform = plt.gcf().transFigure,
borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize='15')
plt.gca().add_artist(first_legend)
plt.setp(axs[0].get_xticklabels(), visible=True)
# o_leg = plt.legend(handles=number_requests_legend_handlers, loc=2, title="#Requests", fontsize=14,
# handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
# plt.setp(o_leg.get_title(), fontsize='15')
plt.suptitle("Profit Comparison: $\mathsf{RR}_{\mathsf{best}}$ / $\mathsf{WiNE}_{\mathsf{best}}$", fontsize=17)
#ax.set_xlabel("rel profit$)", fontsize=16)
# for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(15.5)
# for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(15.5)
# ax.set_xticks([ 1, 1.5, 2, 2.5, 3, 3.5], minor=False)
# ax.set_xticks([0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# ax.set_xticklabels([], minor=True)
# gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# for line in gridlines:
# line.set_linestyle(':')
self._show_and_or_save_plots(output_path, filename, perform_tight_layout=False)
def plot_profit_ecdf_pre_box(self, filter_specifications):
output_filename = "ECDF_profit"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(
output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_relative_profits_arrays(scenario_ids)
print result
fig, axs = plt.subplots(nrows=2, figsize=FIGSIZE, sharex="col")
# ax.set_xscale("log", basex=10)
#colors_erf = ['k', 'g', 'b', 'r', 'y']
colors_erf = [plt.cm.inferno(val) for val in [0.8,0.6,0.4,0.2,0.0]]
max_observed_value = 0
linestyles = [":", "-.", "--", "-"]
number_requests_legend_handlers = []
erf_legend_handlers = []
for j, number_of_requests_list in enumerate([[40, 60], [80, 100]]):
for i, erf in enumerate(self._edge_rfs_list):
result_slice = np.zeros(0)
for number_of_requests in number_of_requests_list:
result_slice = np.concatenate((result_slice, result[erf][number_of_requests]))
sorted_data = np.sort(result_slice[~np.isnan(result_slice)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1, len(sorted_data) + 1) / float(len(sorted_data))
axs[j].plot(sorted_data, yvals, color=colors_erf[i], alpha=0.8, linestyle="-",
label="{} {}".format(erf, number_of_requests_list), linewidth=2.8)
# if j == 0:
# number_requests_legend_handlers.append(
# matplotlib.lines.Line2D([], [], color='gray', linestyle=linestyles[j+2],
# label='{}'.format(number_of_requests_list)))
if j == 0:
erf_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors_erf[i], linestyle="-", linewidth=2.4,
label='{}'.format(erf)))
ax = axs[j]
ax.set_title("#Requests: {} & {}".format(number_of_requests_list[0],number_of_requests_list[1]), fontsize=15)
ax.set_ylabel("ECDF", fontsize=14)
ax.grid(True, which="both", linestyle=":")
ax.set_xlim(0.2,2)
major_x = [0.4, 0.7, 1.0, 1.3, 1.6,1.9]
minor_x = [0.25, 0.55, 0.85, 1.15, 1.45, 1.75]
ax.set_xticks(major_x, minor=False)
ax.set_xticks(minor_x, minor=True)
for x in major_x:
ax.axvline(x, linestyle=':', color='gray', alpha=0.4, linewidth=0.8)
major_y = [0, 0.25, 0.5, 0.75, 1.0]
ax.set_yticks(major_y, minor=False)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
if j == 1:
ax.set_xlabel("profit($\mathsf{RR}_{\mathsf{best}}$) / profit($\mathsf{WiNE}_{\mathsf{best}}$)", fontsize=15)
fig.subplots_adjust(top=0.825)
fig.subplots_adjust(bottom=0.15)
fig.subplots_adjust(right=0.78)
fig.subplots_adjust(hspace=0.3)
fig.subplots_adjust(left=0.18)
first_legend = plt.legend(handles=erf_legend_handlers, title="ERF", loc=4, fontsize=14,
handletextpad=0.35, bbox_to_anchor=(1,0.25), bbox_transform = plt.gcf().transFigure,
borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize='15')
plt.gca().add_artist(first_legend)
plt.setp(axs[0].get_xticklabels(), visible=True)
# o_leg = plt.legend(handles=number_requests_legend_handlers, loc=2, title="#Requests", fontsize=14,
# handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
# plt.setp(o_leg.get_title(), fontsize='15')
plt.suptitle("Relative Profit", fontsize=17)
#ax.set_xlabel("rel profit$)", fontsize=16)
# for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(15.5)
# for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(15.5)
# ax.set_xticks([ 1, 1.5, 2, 2.5, 3, 3.5], minor=False)
# ax.set_xticks([0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# ax.set_xticklabels([], minor=True)
# gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# for line in gridlines:
# line.set_linestyle(':')
self._show_and_or_save_plots(output_path, filename, perform_tight_layout=False)
def plot_relative_performance_Vine_and_RandRound(self, filter_specifications):
output_filename = "boxplot_relative_performance"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(
output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
vine_settings_list = get_list_of_vine_settings()
rr_settings_list = get_list_of_rr_settings()
plot_data_raw = {vine_settings: {scenario_id: None for scenario_id in scenario_ids} for vine_settings in
vine_settings_list}
plot_data_raw.update(
{rr_settings: {scenario_id: None for scenario_id in scenario_ids} for rr_settings in rr_settings_list})
for scenario_id in scenario_ids:
if self.both_randround:
best_vine = max([self._lookup_vine_solution(scenario_id).profits[rr_settings].max for rr_settings in
rr_settings_list])
else:
best_vine = max([self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.max for vine_settings in
vine_settings_list])
best_rr = max([self._lookup_randround_solution(scenario_id).profits[rr_settings].max for rr_settings in
rr_settings_list])
best_bound = self._lookup_randround_solution(scenario_id).lp_profit
best_vine = best_bound
best_rr = best_bound
if self.both_randround:
for rr_settings in rr_settings_list:
plot_data_raw[rr_settings][scenario_id] = (
100.0 * self._lookup_vine_solution(scenario_id).profits[rr_settings].max / best_rr,
100.0 * self._lookup_vine_solution(scenario_id).profits[rr_settings].mean / best_rr
)
else:
for vine_settings in vine_settings_list:
plot_data_raw[vine_settings][scenario_id] = (
100.0 * self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.max / best_vine,
100.0 * self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.mean / best_vine
)
for rr_settings in rr_settings_list:
plot_data_raw[rr_settings][scenario_id] = (
100.0 * self._lookup_randround_solution(scenario_id).profits[rr_settings].max / best_rr,
100.0 * self._lookup_randround_solution(scenario_id).profits[rr_settings].mean / best_rr
)
y_min = -5
y_max = 105
fig, axs = plt.subplots(ncols=2, nrows=1, figsize=FIGSIZE, gridspec_kw={'width_ratios': [13, 20]}, sharey="row")
ax = axs[0]
vine_det = []
vine_rand = []
for vine_settings in vine_settings_list:
if vine_settings.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE:
continue
if vine_settings.rounding_procedure == vine.ViNERoundingProcedure.DETERMINISTIC:
vine_det.append(vine_settings)
else:
vine_rand.append(vine_settings)
ordered_vine_settings = [vine_det, vine_rand]
positions = []
values = []
minor_labels = []
minor_label_locations = []
major_labels = []
major_label_locations = []
current_pos = 0.5
cmap = plt.get_cmap("inferno")
color_best = cmap(0.6)
color_mean = cmap(0)
color_def = cmap(0.6)
colors = []
rr_no_recomp = [(treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.RANDOM),
(treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.STATIC_REQ_PROFIT),
(treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT)]
rr_recomp = [(treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
treewidth_model.RoundingOrder.RANDOM),
(treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
treewidth_model.RoundingOrder.STATIC_REQ_PROFIT),
(treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT)]
ordered_rr_settings = [rr_no_recomp, rr_recomp]
# vine!
if not self.both_randround:
for i in range(2):
# i == 0: det
# i == 1: rand
for vine_settings in ordered_vine_settings[i]:
if i == 0:
current_values = [plot_data_raw[vine_settings][scenario_id][0] for scenario_id in scenario_ids]
values.append(current_values)
positions.append(current_pos)
if vine_settings.lp_objective == vine.ViNELPObjective.ViNE_LB_DEF:
minor_labels.append("L")
else:
minor_labels.append("C")
minor_label_locations.append(current_pos)
current_pos += 1.75
colors.append(color_def)
else:
for j in range(2):
current_values = [plot_data_raw[vine_settings][scenario_id][j] for scenario_id in scenario_ids]
values.append(current_values)
positions.append(current_pos)
current_pos += 0.75
if j == 0:
colors.append(color_best)
else:
colors.append(color_mean)
if vine_settings.lp_objective == vine.ViNELPObjective.ViNE_LB_DEF:
minor_labels.append("L")
else:
minor_labels.append("C")
minor_label_locations.append((positions[-1] + positions[-2]) / 2.0)
current_pos += 0.5
if i == 0:
major_label_locations.append(np.mean(positions))
major_labels.append("Det.")
current_pos += 0.75
else:
major_label_locations.append((positions[2] + positions[-1]) / 2.0)
major_labels.append("Rand.")
else:
for i in range(2):
# i == 0: no_recomp
# i == 1: recomp!
for rr_settings in ordered_rr_settings[i]:
for j in range(2):
current_values = [plot_data_raw[rr_settings][scenario_id][j] for scenario_id in scenario_ids]
values.append(current_values)
positions.append(current_pos)
current_pos += 0.75
if j == 0:
colors.append(color_best)
else:
colors.append(color_mean)
if rr_settings[1] == treewidth_model.RoundingOrder.RANDOM:
minor_labels.append("R")
elif rr_settings[1] == treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT:
minor_labels.append("A")
elif rr_settings[1] == treewidth_model.RoundingOrder.STATIC_REQ_PROFIT:
minor_labels.append("S")
else:
raise ValueError()
minor_label_locations.append((positions[-1] + positions[-2]) / 2.0)
current_pos += 0.5
if i == 0:
major_label_locations.append(np.mean(positions))
major_labels.append("No Recomp.")
current_pos += 1
else:
major_label_locations.append((positions[6] + positions[-1]) / 2.0)
major_labels.append("Recomp.")
# bplots = []
#
# for _bin, pos in zip(values, positions):
# print "plot...", pos
# bplots.append(ax.boxplot(x=_bin,
# positions=[pos],
# widths=[0.5],
# patch_artist=True))
bplots = ax.boxplot(x=values,
positions=positions,
widths=[0.5] * len(positions),
patch_artist=True,
notch=True,
bootstrap=10000)
for i in range(len(bplots)):
color = colors[i]
bplots['boxes'][i].set_edgecolor(color)
bplots['boxes'][i].set_facecolor(
matplotlib.colors.to_rgba(color, alpha=0.3)
)
for keyword in ["medians", "fliers", "whiskers", "caps"]:
if keyword == "whiskers" or keyword == "caps":
bplots[keyword][i * 2].set_color(color)
bplots[keyword][i * 2 + 1].set_color(color)
else:
bplots[keyword][i].set_color(color)
if keyword == "fliers":
bplots[keyword][i].set(
marker='o',
markeredgecolor=matplotlib.colors.to_rgba(color, alpha=0.15),
)
ax.set_ylim(y_min, y_max)
for k in range(len(minor_label_locations)):
ax.text(x=minor_label_locations[k], y=y_min - 11, s=minor_labels[k], horizontalalignment='center',
fontdict={'fontsize': 14})
for k in range(len(major_label_locations)):
ax.text(x=major_label_locations[k], y=y_min - 21, s=major_labels[k], horizontalalignment='center',
fontdict={'fontsize': 14})
ax.set_xticks([])
ax.set_yticks([x * 10 for x in range(1, 10, 2)], minor=True)
ax.grid(True, which="major", linestyle="-")
ax.grid(True, which="minor", linestyle=":")
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
ax.set_title("WiNE(ViNE)", fontsize=16)
ax.set_ylabel("Profit / $\mathsf{LP}_{\mathsf{UB}}$ [%]", fontsize=16)
# RAND ROUND!
ax = axs[1]
positions = []
values = []
minor_labels = []
minor_label_locations = []
major_labels = []
major_label_locations = []
current_pos = 0.5
colors = []
fig.subplots_adjust(bottom=0.18, top=0.84, right=0.83, wspace=0.12, left=0.14)
# rand round
for i in range(2):
# i == 0: no_recomp
# i == 1: recomp!
for rr_settings in ordered_rr_settings[i]:
for j in range(2):
current_values = [plot_data_raw[rr_settings][scenario_id][j] for scenario_id in scenario_ids]
values.append(current_values)
positions.append(current_pos)
current_pos += 0.75
if j == 0:
colors.append(color_best)
else:
colors.append(color_mean)
if rr_settings[1] == treewidth_model.RoundingOrder.RANDOM:
minor_labels.append("R")
elif rr_settings[1] == treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT:
minor_labels.append("A")
elif rr_settings[1] == treewidth_model.RoundingOrder.STATIC_REQ_PROFIT:
minor_labels.append("S")
else:
raise ValueError()
minor_label_locations.append((positions[-1] + positions[-2]) / 2.0)
current_pos += 0.5
if i == 0:
major_label_locations.append(np.mean(positions))
major_labels.append("No Recomp.")
current_pos += 1
else:
major_label_locations.append((positions[6] + positions[-1]) / 2.0)
major_labels.append("Recomp.")
bplots = ax.boxplot(x=values,
positions=positions,
widths=[0.5] * len(positions),
patch_artist=True,
notch=True,
bootstrap=1000)
print bplots
print colors
for i in range(len(positions)):
print "Setting color of boxplot ", i
color = colors[i]
bplots['boxes'][i].set_edgecolor(color)
bplots['boxes'][i].set_facecolor(
matplotlib.colors.to_rgba(color, alpha=0.3)
)
for keyword in ["medians", "fliers", "whiskers", "caps"]:
if keyword == "whiskers" or keyword == "caps":
bplots[keyword][i * 2].set_color(color)
bplots[keyword][i * 2 + 1].set_color(color)
else:
bplots[keyword][i].set_color(color)
if keyword == "fliers":
bplots[keyword][i].set(
marker='o',
markeredgecolor=matplotlib.colors.to_rgba(color, alpha=0.15),
)
ax.set_ylim(y_min, y_max)
for k in range(len(minor_label_locations)):
ax.text(x=minor_label_locations[k], y=y_min - 11, s=minor_labels[k], horizontalalignment='center',
fontdict={'fontsize': 14})
for k in range(len(major_label_locations)):
ax.text(x=major_label_locations[k], y=y_min - 21, s=major_labels[k], horizontalalignment='center',
fontdict={'fontsize': 14})
ax.set_xticks([])
ax.set_title("RR Heuristics", fontsize=16)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
ax.set_yticks([x * 10 for x in range(1, 10, 2)], minor=True)
ax.grid(True, which="major", linestyle="-")
ax.grid(True, which="minor", linestyle=":")
# LEGEND!
best_patch = mpatches.Patch(color=matplotlib.colors.to_rgba(color_best, alpha=0.6), label='best')
mean_patch = mpatches.Patch(color=matplotlib.colors.to_rgba(color_mean, alpha=0.6), label='mean')
plt.legend(handles=[best_patch, mean_patch], loc=4, fontsize=14, handlelength=0.5,
handletextpad=0.35, bbox_to_anchor=(1, 0.5), bbox_transform=plt.gcf().transFigure,
borderaxespad=0.175, borderpad=0.2)
plt.suptitle("Performance of Algorithm Variants", fontsize=17)
self._show_and_or_save_plots(output_path, filename, perform_tight_layout=False)
# def plot_relative_performance_Vine_and_RandRound(self, filter_specifications):
#
# output_filename = "boxplot_relative_performance"
#
# output_path, filename = self._construct_output_path_and_filename(output_filename,
# filter_specifications)
#
# logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
#
# if not self.overwrite_existing_files and os.path.exists(filename):
# logger.info("Skipping generation of {} as this file already exists".format(filename))
# return
#
# if filter_specifications:
# for filter_specification in filter_specifications:
# if filter_specification["parameter"] == "number_of_requests":
# logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(
# output_filename, filter_specification))
# return
#
# scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
#
# if self.forbidden_scenario_ids:
# scenario_ids = scenario_ids - self.forbidden_scenario_ids
#
# vine_settings_list = get_list_of_vine_settings()
# rr_settings_list = get_list_of_rr_settings()
#
# plot_data_raw = {vine_settings: {scenario_id: None for scenario_id in scenario_ids} for vine_settings in
# vine_settings_list}
# plot_data_raw.update(
# {rr_settings: {scenario_id: None for scenario_id in scenario_ids} for rr_settings in rr_settings_list})
#
# for scenario_id in scenario_ids:
# best_vine = max([self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.max for vine_settings in
# vine_settings_list])
# best_rr = max([self._lookup_randround_solution(scenario_id).profits[rr_settings].max for rr_settings in
# rr_settings_list])
# best_bound = self._lookup_randround_solution(scenario_id).lp_profit
# best_vine = best_bound
# best_rr = best_bound
#
# for vine_settings in vine_settings_list:
# plot_data_raw[vine_settings][scenario_id] = (
# 100.0 * self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.max / best_vine,
# 100.0 * self._lookup_vine_solution(scenario_id)[vine_settings][0].profit.mean / best_vine
# )
# for rr_settings in rr_settings_list:
# plot_data_raw[rr_settings][scenario_id] = (
# 100.0 * self._lookup_randround_solution(scenario_id).profits[rr_settings].max / best_rr,
# 100.0 * self._lookup_randround_solution(scenario_id).profits[rr_settings].mean / best_rr
# )
#
# y_min = -5
# y_max = 105
#
# fig, axs = plt.subplots(ncols=2, nrows=1, figsize=FIGSIZE, gridspec_kw={'width_ratios': [13, 20]}, sharey="row")
# ax = axs[0]
#
# vine_det = []
# vine_rand = []
#
# for vine_settings in vine_settings_list:
# if vine_settings.edge_embedding_model == vine.ViNEEdgeEmbeddingModel.SPLITTABLE:
# continue
# if vine_settings.rounding_procedure == vine.ViNERoundingProcedure.DETERMINISTIC:
# vine_det.append(vine_settings)
# else:
# vine_rand.append(vine_settings)
#
# ordered_vine_settings = [vine_det, vine_rand]
#
# positions = []
# values = []
#
# minor_labels = []
# minor_label_locations = []
#
# major_labels = []
# major_label_locations = []
# current_pos = 0.5
#
# cmap = plt.get_cmap("inferno")
#
# color_best = cmap(0.6)
# color_mean = cmap(0)
# color_def = cmap(0.6)
#
# colors = []
#
# # vine!
# for i in range(2):
# # i == 0: det
# # i == 1: rand
# for vine_settings in ordered_vine_settings[i]:
# if i == 0:
# current_values = [plot_data_raw[vine_settings][scenario_id][0] for scenario_id in scenario_ids]
# values.append(current_values)
# positions.append(current_pos)
# if vine_settings.lp_objective == vine.ViNELPObjective.ViNE_LB_DEF:
# minor_labels.append("L")
# else:
# minor_labels.append("C")
# minor_label_locations.append(current_pos)
# current_pos += 1.75
# colors.append(color_def)
# else:
# for j in range(2):
# current_values = [plot_data_raw[vine_settings][scenario_id][j] for scenario_id in scenario_ids]
# values.append(current_values)
# positions.append(current_pos)
# current_pos += 0.75
# if j == 0:
# colors.append(color_best)
# else:
# colors.append(color_mean)
#
# if vine_settings.lp_objective == vine.ViNELPObjective.ViNE_LB_DEF:
# minor_labels.append("L")
# else:
# minor_labels.append("C")
# minor_label_locations.append((positions[-1] + positions[-2]) / 2.0)
# current_pos += 0.5
# if i == 0:
# major_label_locations.append(np.mean(positions))
# major_labels.append("Det.")
# current_pos += 0.75
# else:
# major_label_locations.append((positions[2] + positions[-1]) / 2.0)
# major_labels.append("Rand.")
#
# # bplots = []
# #
# # for _bin, pos in zip(values, positions):
# # print "plot...", pos
# # bplots.append(ax.boxplot(x=_bin,
# # positions=[pos],
# # widths=[0.5],
# # patch_artist=True))
#
# bplots = ax.boxplot(x=values,
# positions=positions,
# widths=[0.5] * len(positions),
# patch_artist=True,
# notch=True,
# bootstrap=10000)
#
# for i in range(len(bplots)):
# color = colors[i]
# bplots['boxes'][i].set_edgecolor(color)
# bplots['boxes'][i].set_facecolor(
# matplotlib.colors.to_rgba(color, alpha=0.3)
# )
#
# for keyword in ["medians", "fliers", "whiskers", "caps"]:
# if keyword == "whiskers" or keyword == "caps":
# bplots[keyword][i * 2].set_color(color)
# bplots[keyword][i * 2 + 1].set_color(color)
# else:
# bplots[keyword][i].set_color(color)
# if keyword == "fliers":
# bplots[keyword][i].set(
# marker='o',
# markeredgecolor=matplotlib.colors.to_rgba(color, alpha=0.15),
# )
#
# ax.set_ylim(y_min, y_max)
#
# for k in range(len(minor_label_locations)):
# ax.text(x=minor_label_locations[k], y=y_min - 11, s=minor_labels[k], horizontalalignment='center',
# fontdict={'fontsize': 14})
#
# for k in range(len(major_label_locations)):
# ax.text(x=major_label_locations[k], y=y_min - 21, s=major_labels[k], horizontalalignment='center',
# fontdict={'fontsize': 14})
#
# ax.set_xticks([])
#
# ax.set_yticks([x * 10 for x in range(1, 10, 2)], minor=True)
#
# ax.grid(True, which="major", linestyle="-")
# ax.grid(True, which="minor", linestyle=":")
#
# ax.set_title("ViNE", fontsize=15.5)
#
# ax.set_ylabel("Relative Performance [%]", fontsize=14)
#
# # RAND ROUND!
#
# ax = axs[1]
#
# rr_no_recomp = [(treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.RANDOM),
# (treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.STATIC_REQ_PROFIT),
# (treewidth_model.LPRecomputationMode.NONE, treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT)]
# rr_recomp = [(treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
# treewidth_model.RoundingOrder.RANDOM),
# (treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
# treewidth_model.RoundingOrder.STATIC_REQ_PROFIT),
# (treewidth_model.LPRecomputationMode.RECOMPUTATION_WITHOUT_SEPARATION,
# treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT)]
#
# ordered_rr_settings = [rr_no_recomp, rr_recomp]
#
# positions = []
# values = []
#
# minor_labels = []
# minor_label_locations = []
#
# major_labels = []
# major_label_locations = []
# current_pos = 0.5
#
# colors = []
#
# fig.subplots_adjust(bottom=0.18, top=0.84, right=0.83, wspace=0.12)
#
# # rand round
# for i in range(2):
# # i == 0: no_recomp
# # i == 1: recomp!
# for rr_settings in ordered_rr_settings[i]:
# for j in range(2):
# current_values = [plot_data_raw[rr_settings][scenario_id][j] for scenario_id in scenario_ids]
# values.append(current_values)
# positions.append(current_pos)
# current_pos += 0.75
# if j == 0:
# colors.append(color_best)
# else:
# colors.append(color_mean)
#
# if rr_settings[1] == treewidth_model.RoundingOrder.RANDOM:
# minor_labels.append("R")
# elif rr_settings[1] == treewidth_model.RoundingOrder.ACHIEVED_REQ_PROFIT:
# minor_labels.append("A")
# elif rr_settings[1] == treewidth_model.RoundingOrder.STATIC_REQ_PROFIT:
# minor_labels.append("S")
# else:
# raise ValueError()
# minor_label_locations.append((positions[-1] + positions[-2]) / 2.0)
# current_pos += 0.5
#
# if i == 0:
# major_label_locations.append(np.mean(positions))
# major_labels.append("No Recomp.")
# current_pos += 1
# else:
# major_label_locations.append((positions[6] + positions[-1]) / 2.0)
# major_labels.append("Recomp.")
#
# bplots = ax.boxplot(x=values,
# positions=positions,
# widths=[0.5] * len(positions),
# patch_artist=True,
# notch=True,
# bootstrap=1000)
#
# print bplots
# print colors
#
# for i in range(len(positions)):
# print "Setting color of boxplot ", i
# color = colors[i]
# bplots['boxes'][i].set_edgecolor(color)
# bplots['boxes'][i].set_facecolor(
# matplotlib.colors.to_rgba(color, alpha=0.3)
# )
#
# for keyword in ["medians", "fliers", "whiskers", "caps"]:
# if keyword == "whiskers" or keyword == "caps":
# bplots[keyword][i * 2].set_color(color)
# bplots[keyword][i * 2 + 1].set_color(color)
# else:
# bplots[keyword][i].set_color(color)
# if keyword == "fliers":
# bplots[keyword][i].set(
# marker='o',
# markeredgecolor=matplotlib.colors.to_rgba(color, alpha=0.15),
# )
#
# ax.set_ylim(y_min, y_max)
#
# for k in range(len(minor_label_locations)):
# ax.text(x=minor_label_locations[k], y=y_min - 11, s=minor_labels[k], horizontalalignment='center',
# fontdict={'fontsize': 14})
#
# for k in range(len(major_label_locations)):
# ax.text(x=major_label_locations[k], y=y_min - 21, s=major_labels[k], horizontalalignment='center',
# fontdict={'fontsize': 14})
#
# ax.set_xticks([])
#
# ax.set_title("RR Heuristics", fontsize=15.5)
#
# ax.set_yticks([x * 10 for x in range(1, 10, 2)], minor=True)
#
# ax.grid(True, which="major", linestyle="-")
# ax.grid(True, which="minor", linestyle=":")
#
# # LEGEND!
#
# best_patch = mpatches.Patch(color=matplotlib.colors.to_rgba(color_best, alpha=0.6), label='best')
# mean_patch = mpatches.Patch(color=matplotlib.colors.to_rgba(color_mean, alpha=0.6), label='mean')
#
# plt.legend(handles=[best_patch, mean_patch], loc=4, fontsize=14, handlelength=0.5,
# handletextpad=0.35, bbox_to_anchor=(1, 0.5), bbox_transform=plt.gcf().transFigure,
# borderaxespad=0.175, borderpad=0.2)
#
# plt.suptitle("Performance of Algorithm Variants", fontsize=17)
#
# self._show_and_or_save_plots(output_path, filename, perform_tight_layout=False)
def plot_profit_ecdf_old(self, filter_specifications):
output_filename = "ECDF_profit"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(
output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_relative_profits_arrays(scenario_ids)
print result
fix, ax = plt.subplots(figsize=FIGSIZE)
# ax.set_xscale("log", basex=10)
colors_erf = ['k', 'g', 'b', 'r', 'y']
max_observed_value = 0
linestyles = [":", "-.", "--", "-"]
number_requests_legend_handlers = []
erf_legend_handlers = []
for i, erf in enumerate(self._edge_rfs_list):
previous_slice = None
for j, number_of_requests in enumerate(self._number_of_requests_list):
result_slice = result[erf][number_of_requests]
sorted_data = np.sort(result_slice[~np.isnan(result_slice)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1, len(sorted_data) + 1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=colors_erf[i], linestyle=linestyles[j],
label="{} {}".format(erf, number_of_requests), linewidth=1.8)
if i == 0:
number_requests_legend_handlers.append(
matplotlib.lines.Line2D([], [], color='gray', linestyle=linestyles[j],
label='|req|: {}'.format(number_of_requests)))
erf_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors_erf[i], linestyle="-",
label='ERF: {}'.format(erf)))
first_legend = plt.legend(title="", handles=erf_legend_handlers, loc=(0.225, 0.0125), fontsize=14,
handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize='15')
plt.gca().add_artist(first_legend)
o_leg = plt.legend(handles=number_requests_legend_handlers, loc=4, title="#Requests", fontsize=14,
handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(o_leg.get_title(), fontsize='15')
ax.set_title("FOO", fontsize=17)
ax.set_xlabel("rel profit$)", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
# ax.set_xticks([ 1, 1.5, 2, 2.5, 3, 3.5], minor=False)
# ax.set_xticks([0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":")
# gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# for line in gridlines:
# line.set_linestyle(':')
self._show_and_or_save_plots(output_path, filename)
def evaluate_vine_and_randround(dc_vine,
vine_algorithm_id,
vine_execution_id,
dc_randround_seplp_dynvmp,
randround_seplp_algorithm_id,
randround_seplp_execution_id,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=True,
forbidden_scenario_ids=None,
papermode=True,
maxdepthfilter=2,
output_path="./",
output_filetype="png",
request_sets=None):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param heatmap_plot_type:
:param dc_vine: unpickled datacontainer of vine experiments
:param vine_algorithm_id: algorithm id of the vine algorithm
:param vine_execution_id: execution config (numeric) of the vine algorithm execution
:param dc_randround_seplp_dynvmp: unpickled datacontainer of randomized rounding experiments
:param randround_seplp_algorithm_id: algorithm id of the randround algorithm
:param randround_seplp_execution_id: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param overwrite_existing_files: shall existing files be overwritten?
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param papermode: nicely layouted plots (papermode) or rather additional information?
:param maxdepthfilter: length of filter permutations that shall be considered
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.iteritems():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_vine.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_vine = lookup_scenario_parameter_room_dicts_on_path(
dc_vine.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_randround = lookup_scenario_parameter_room_dicts_on_path(
dc_randround_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
# add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_vine.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
# remove the respective values from the scenario parameter room such that these are not considered when
# constructing e.g. axes
parameter_dicts_vine[-1][key] = [value for value in parameter_dicts_vine[-1][key] if
value not in values_to_exclude]
parameter_dicts_randround[-1][key] = [value for value in parameter_dicts_randround[-1][key] if
value not in values_to_exclude]
if parameter_filter_keys is not None:
filter_specs = _construct_filter_specs(dc_vine.scenario_parameter_container.scenarioparameter_room,
parameter_filter_keys,
maxdepth=maxdepthfilter)
else:
filter_specs = [None]
plotters = []
# initialize plotters for each valid vine setting...
vine_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_vine,
algorithm_id=vine_algorithm_id,
execution_id=vine_execution_id,
heatmap_plot_type=HeatmapPlotType.ViNE,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters.append(vine_plotter)
randround_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_randround_seplp_dynvmp,
algorithm_id=randround_seplp_algorithm_id,
execution_id=randround_seplp_execution_id,
heatmap_plot_type=HeatmapPlotType.RandRoundSepLPDynVMP,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters.append(randround_plotter)
comparison_plotter = ComparisonHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
vine_solution_storage=dc_vine,
vine_algorithm_id=vine_algorithm_id,
vine_execution_id=vine_execution_id,
randround_scenario_solution_storage=dc_randround_seplp_dynvmp,
randround_algorithm_id=randround_seplp_algorithm_id,
randround_execution_id=randround_seplp_execution_id,
heatmap_plot_type=HeatmapPlotType.ComparisonVineRandRound,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters.append(comparison_plotter)
ecdf_plotter = ComparisonPlotter_ECDF_BoxPlot(output_path=output_path,
output_filetype=output_filetype,
vine_solution_storage=dc_vine,
vine_algorithm_id=vine_algorithm_id,
vine_execution_id=vine_execution_id,
randround_solution_storage=dc_randround_seplp_dynvmp,
randround_algorithm_id=randround_seplp_algorithm_id,
randround_execution_id=randround_seplp_execution_id,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode,
request_sets=request_sets)
plotters.append(ecdf_plotter)
for filter_spec in filter_specs:
for plotter in plotters:
plotter.plot_figure(filter_spec)
def evaluate_latency_and_baseline(dc_baseline,
dc_with_latencies,
algorithm_id,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=True,
forbidden_scenario_ids=None,
papermode=True,
maxdepthfilter=10,
output_path="./",
output_filetype="png",
filter_type=None,
filter_exec_params=None):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param heatmap_plot_type:
:param dc_vine: unpickled datacontainer of vine experiments
:param vine_algorithm_id: algorithm id of the vine algorithm
:param vine_execution_id: execution config (numeric) of the vine algorithm execution
:param dc_randround_seplp_dynvmp: unpickled datacontainer of randomized rounding experiments
:param randround_seplp_execution_id: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param overwrite_existing_files: shall existing files be overwritten?
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param papermode: nicely layouted plots (papermode) or rather additional information?
:param maxdepthfilter: length of filter permutations that shall be considered
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.iteritems():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_baseline.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_vine = lookup_scenario_parameter_room_dicts_on_path(
dc_baseline.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_randround = lookup_scenario_parameter_room_dicts_on_path(
dc_with_latencies.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
# add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
# remove the respective values from the scenario parameter room such that these are not considered when
# constructing e.g. axes
parameter_dicts_vine[-1][key] = [value for value in parameter_dicts_vine[-1][key] if
value not in values_to_exclude]
parameter_dicts_randround[-1][key] = [value for value in parameter_dicts_randround[-1][key] if
value not in values_to_exclude]
if parameter_filter_keys is not None:
filter_specs = _construct_filter_specs(dc_with_latencies.scenario_parameter_container.scenarioparameter_room,
parameter_filter_keys,
maxdepth=maxdepthfilter)
else:
filter_specs = [None]
plotters = []
# initialize plotters for each valid vine setting...
randround_plotter = LatencyStudyPlotter(output_path=output_path,
output_filetype=output_filetype,
baseline_solution_storage=dc_baseline,
algorithm_id=algorithm_id,
with_latencies_solution_storage=dc_with_latencies,
heatmap_plot_type=HeatmapPlotType.LatencyStudy,
filter_type=filter_type,
filter_exec_params=filter_exec_params,
list_of_axes_specifications=global_heatmap_axes_specifications_latency_study,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters.append(randround_plotter)
comparison_plotter = LatencyStudyPlotter(output_path=output_path,
output_filetype=output_filetype,
baseline_solution_storage=dc_baseline,
algorithm_id=algorithm_id,
comparison=True,
with_latencies_solution_storage=dc_with_latencies,
heatmap_plot_type=HeatmapPlotType.ComparisonLatencyBaseline,
filter_type=filter_type,
filter_exec_params=filter_exec_params,
list_of_axes_specifications=global_heatmap_axes_specifications_latency_study_comparison,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters.append(comparison_plotter)
for filter_spec in filter_specs:
for plotter in plotters:
plotter.plot_figure(filter_spec)
def iterate_algorithm_sub_parameters(plot_type):
if plot_type == HeatmapPlotType.ViNE:
for (edge_embedding_model, lp_objective, rounding_procedure) in itertools.product(
vine.ViNEEdgeEmbeddingModel,
vine.ViNELPObjective,
vine.ViNERoundingProcedure,
):
yield vine.ViNESettingsFactory.get_vine_settings(
edge_embedding_model=edge_embedding_model,
lp_objective=lp_objective,
rounding_procedure=rounding_procedure,
)
elif plot_type == HeatmapPlotType.RandRoundSepLPDynVMP:
for sub_param in itertools.product(
treewidth_model.LPRecomputationMode,
treewidth_model.RoundingOrder,
):
yield sub_param
| 50.618056
| 5,393
| 0.603435
| 19,786
| 167,647
| 4.800566
| 0.04857
| 0.023499
| 0.003737
| 0.002906
| 0.809378
| 0.770182
| 0.720742
| 0.687684
| 0.663996
| 0.64533
| 0
| 0.043865
| 0.301324
| 167,647
| 3,311
| 5,394
| 50.633343
| 0.767056
| 0.161178
| 0
| 0.536562
| 0
| 0.004358
| 0.144101
| 0.023727
| 0
| 0
| 0
| 0.000302
| 0.000484
| 0
| null | null | 0
| 0.010654
| null | null | 0.012107
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bde6e55a22de6dab2773d92f66866d3b8d86d5a
| 26
|
py
|
Python
|
empiriciSN/__init__.py
|
tholoien/empiricisn
|
ef4d6a77cea5875badab0cb6404fda259e35864a
|
[
"MIT"
] | 2
|
2016-09-18T22:40:38.000Z
|
2020-02-05T17:43:50.000Z
|
empiriciSN/__init__.py
|
tholoien/empiricisn
|
ef4d6a77cea5875badab0cb6404fda259e35864a
|
[
"MIT"
] | 26
|
2016-06-14T18:00:37.000Z
|
2019-08-20T15:58:22.000Z
|
empiriciSN/__init__.py
|
tholoien/empiricisn
|
ef4d6a77cea5875badab0cb6404fda259e35864a
|
[
"MIT"
] | 4
|
2016-06-15T01:24:08.000Z
|
2020-02-05T17:43:55.000Z
|
from .empiriciSN import *
| 13
| 25
| 0.769231
| 3
| 26
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3bed32800fe8697c72779e2fe24ad803b284d397
| 45
|
py
|
Python
|
webracer/__init__.py
|
p/webracer
|
3eb40b520bbf884c4458482fc3a05a9a9632d026
|
[
"BSD-2-Clause"
] | null | null | null |
webracer/__init__.py
|
p/webracer
|
3eb40b520bbf884c4458482fc3a05a9a9632d026
|
[
"BSD-2-Clause"
] | null | null | null |
webracer/__init__.py
|
p/webracer
|
3eb40b520bbf884c4458482fc3a05a9a9632d026
|
[
"BSD-2-Clause"
] | 1
|
2019-04-13T07:43:28.000Z
|
2019-04-13T07:43:28.000Z
|
from .agent import *
from .testcase import *
| 15
| 23
| 0.733333
| 6
| 45
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 24
| 22.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0271d41bf65a9ed3dd6a34e25ef9aea77a4027ee
| 88
|
py
|
Python
|
pylisten/__init__.py
|
deeuu/pylisten
|
3b8f9db7b7311a5a42ef7811acff284ca6854f30
|
[
"MIT"
] | null | null | null |
pylisten/__init__.py
|
deeuu/pylisten
|
3b8f9db7b7311a5a42ef7811acff284ca6854f30
|
[
"MIT"
] | null | null | null |
pylisten/__init__.py
|
deeuu/pylisten
|
3b8f9db7b7311a5a42ef7811acff284ca6854f30
|
[
"MIT"
] | null | null | null |
from . import parser
from . import correlation
from . import utils
from . import mushra
| 17.6
| 25
| 0.772727
| 12
| 88
| 5.666667
| 0.5
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 88
| 4
| 26
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a592b5f41094525ce9bcad8a3f18fc8607f9922
| 94
|
py
|
Python
|
pyobjcryst/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | null | null | null |
pyobjcryst/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | 20
|
2018-03-07T07:57:46.000Z
|
2021-12-21T19:00:18.000Z
|
pyobjcryst/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | 5
|
2018-03-07T07:57:16.000Z
|
2021-12-18T13:15:52.000Z
|
#!/usr/bin/env python
import pyobjcryst.tests
assert pyobjcryst.tests.test().wasSuccessful()
| 18.8
| 46
| 0.787234
| 12
| 94
| 6.166667
| 0.833333
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 94
| 4
| 47
| 23.5
| 0.850575
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5a608a1b36f96b2f59190fff297913276c4cb298
| 48
|
py
|
Python
|
bitmovin/services/analytics/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 44
|
2016-12-12T17:37:23.000Z
|
2021-03-03T09:48:48.000Z
|
bitmovin/services/analytics/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 38
|
2017-01-09T14:45:45.000Z
|
2022-02-27T18:04:33.000Z
|
bitmovin/services/analytics/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 27
|
2017-02-02T22:49:31.000Z
|
2019-11-21T07:04:57.000Z
|
from .analytics_service import AnalyticsService
| 24
| 47
| 0.895833
| 5
| 48
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a630282c1a0aed8f5be56a3f56a0a9db0ed226e
| 230
|
py
|
Python
|
examples/python.tornado/gen-py.tornado/v1/music/__init__.py
|
ariasheets-wk/frugal
|
81d41af7fb573c1f97afea99a1b4dfa6ccae29e8
|
[
"Apache-2.0"
] | 144
|
2017-08-17T15:51:58.000Z
|
2022-01-14T21:36:55.000Z
|
examples/python.tornado/gen-py.tornado/v1/music/__init__.py
|
ariasheets-wk/frugal
|
81d41af7fb573c1f97afea99a1b4dfa6ccae29e8
|
[
"Apache-2.0"
] | 930
|
2017-08-17T17:53:30.000Z
|
2022-03-28T14:04:49.000Z
|
examples/python.tornado/gen-py.tornado/v1/music/__init__.py
|
ariasheets-wk/frugal
|
81d41af7fb573c1f97afea99a1b4dfa6ccae29e8
|
[
"Apache-2.0"
] | 77
|
2017-08-17T15:54:31.000Z
|
2021-12-25T15:18:34.000Z
|
from .f_AlbumWinners_publisher import AlbumWinnersPublisher
from .f_AlbumWinners_subscriber import AlbumWinnersSubscriber
from .f_Store import Client as FStoreClient
from .f_Store import Iface as FStoreIface
from .ttypes import *
| 38.333333
| 61
| 0.869565
| 29
| 230
| 6.689655
| 0.517241
| 0.103093
| 0.175258
| 0.164948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 230
| 5
| 62
| 46
| 0.941748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a759f2e5211ef1e5eb7159cb79d197d4dfef91b
| 26
|
py
|
Python
|
agsadmin/rest_admin/system/__init__.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 2
|
2015-12-07T05:53:29.000Z
|
2020-09-13T18:12:15.000Z
|
agsadmin/rest_admin/system/__init__.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 4
|
2015-03-09T05:59:14.000Z
|
2018-01-09T00:12:56.000Z
|
agsadmin/rest_admin/system/__init__.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 5
|
2015-03-09T01:05:24.000Z
|
2019-09-09T23:01:21.000Z
|
from .System import System
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a85e247377fac876ca18d2eb0c5fc72d5e70bcc
| 465
|
py
|
Python
|
cgi-bin/any/logCreate.py
|
5610110083/Safety-in-residential-project
|
000a48f8c5e94f69497a40529f3540d6b1603ad1
|
[
"Apache-2.0"
] | null | null | null |
cgi-bin/any/logCreate.py
|
5610110083/Safety-in-residential-project
|
000a48f8c5e94f69497a40529f3540d6b1603ad1
|
[
"Apache-2.0"
] | null | null | null |
cgi-bin/any/logCreate.py
|
5610110083/Safety-in-residential-project
|
000a48f8c5e94f69497a40529f3540d6b1603ad1
|
[
"Apache-2.0"
] | null | null | null |
import logging
logging.basicConfig(filename='logfile\logCreate.log',level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.warning('is when this event was logged.')
print("============================================================================")
print("==================== = = = == S u c c e s s == = = = =====================")
print("============================================================================")
| 51.666667
| 139
| 0.367742
| 42
| 465
| 4.071429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07957
| 465
| 8
| 140
| 58.125
| 0.399533
| 0
| 0
| 0.333333
| 0
| 0
| 0.693966
| 0.418103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ce72e6448ab5aefd4786d8d2f758c7820903fb44
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/lib/_iotools.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/lib/_iotools.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/lib/_iotools.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/38/be/8c/9a259a6d3d7f837d188468d8acd16e068352c83087aa20e05eebbfa854
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ceaf7d9131970f03a66dc74526029683188dcde0
| 15,833
|
py
|
Python
|
pirates/leveleditor/worldData/del_fuego_area_cave_c_1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/del_fuego_area_cave_c_1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/del_fuego_area_cave_c_1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.del_fuego_area_cave_c_1
from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {'Objects': {'1164929110.98sdnaik': {'Type': 'Island Game Area', 'Name': 'del_fuego_area_cave_c_1', 'File': '', 'Instanced': True, 'Minimap': False, 'Objects': {'1164930102.27sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_1', 'Hpr': VBase3(57.196, 0.0, 0.0), 'Pos': Point3(-148.822, -121.561, 26.647), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1164930102.28sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_2', 'Hpr': VBase3(178.366, 0.0, 0.0), 'Pos': Point3(162.003, -10.773, 2.083), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1176238208.0dxschafe': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'gp_chant_a', 'Hpr': VBase3(137.437, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(24.46, 39.593, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T5', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1176238208.0dxschafe0': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'gp_chant_b', 'Hpr': VBase3(8.215, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(16.455, 18.41, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T5', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1176238208.0dxschafe1': {'Type': 'Player Spawn Node', 'Hpr': VBase3(85.413, 0.0, 0.0), 'Index': -1, 'Pos': Point3(69.237, 2.171, 0.069), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'Visual': {'Color': (0.5, 0.5, 0.5, 1), 'Model': 'models/misc/smiley'}}, '1176239104.0dxschafe': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'gp_searching', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '7.7530', 'Pause Chance': '23', 'Pause Duration': '97', 'Pos': Point3(-111.72, -53.65, 24.74), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Dread Scorpion', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1188602752.0dxschafe': {'Type': 'Player Spawn Node', 'Hpr': VBase3(85.413, 0.0, 0.0), 'Index': -1, 'Pos': Point3(79.135, 58.949, 0.069), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'VisSize': '', 'Visual': {'Color': (0.5, 0.5, 0.5, 1), 'Model': 'models/misc/smiley'}}, '1188602752.0dxschafe0': {'Type': 'Player Spawn Node', 'Hpr': VBase3(85.413, 0.0, 0.0), 'Index': -1, 'Pos': Point3(110.414, -27.524, 0.077), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'Visual': {'Color': (0.5, 0.5, 0.5, 1), 'Model': 'models/misc/smiley'}}, '1189033600.0dchiappe': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': '0.0964', 'Flickering': False, 'Hpr': VBase3(0.0, 1.968, 0.0), 'Intensity': '1.3735', 'LightType': 'POINT', 'Pos': Point3(-60.328, -27.47, 65.808), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 0.2, 0.0, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1189033600.0dchiappe0': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': '0.5000', 'Flickering': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Intensity': '1.0120', 'LightType': 'POINT', 'Pos': Point3(107.938, 64.026, 31.319), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 0.36, 0.39, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1189033728.0dchiappe': {'Type': 'Effect Node', 'EffectName': 'bonfire_effect', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(16.178, 31.787, 0.0), 'Scale': VBase3(0.641, 0.641, 0.641), 'Visual': {'Color': (0.0, 1.0, 0.0, 1.0), 'Model': 'models/misc/smiley'}}, '1189033856.0dchiappe': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'gp_summon', 'Hpr': VBase3(-96.39, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(5.546, 33.646, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T5', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1189637504.0dxschafe': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '10', 'Pause Duration': '5', 'Pos': Point3(-115.353, -4.892, 25.003), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189637504.0dxschafe0': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '19', 'Pause Duration': '5', 'Pos': Point3(-85.201, 22.347, 24.947), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189637504.0dxschafe1': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '74', 'Pause Duration': '5', 'Pos': Point3(-77.619, 64.292, 15.37), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1245456238.69piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(-172.875, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(32.393, 86.825, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Bandido', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456279.94piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(159.444, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(59.63, 84.278, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Bandido', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456317.19piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(63.435, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(108.667, -80.491, 0.073), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T6', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456422.86piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(-91.548, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-111.266, 59.542, 16.376), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Dread Scorpion', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456456.48piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(72.582, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(135.843, 43.604, 0.071), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Dread Scorpion', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456481.61piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(165.964, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(108.856, 87.371, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Pirata', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456495.84piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(126.87, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(123.547, 76.181, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Pirata', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456520.91piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(26.565, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(105.192, -121.129, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T6', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456614.55piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(-48.857, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '7.0663', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-24.892, -76.399, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Bandido', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456628.66piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(-92.862, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '6.7229', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-30.091, -59.001, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Spanish Undead Pirata', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245456780.69piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(332.008, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(57.006, -70.842, 0.076), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T6', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1245457393.06piwanow': {'Type': 'Spawn Node', 'AnimSet': 'default', 'Hpr': VBase3(-69.057, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '8.4398', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-34.254, -6.97, 0.069), 'PoseAnim': '', 'PoseFrame': '', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skel T5', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}}, 'Visibility': 'Grid', 'Visual': {'Model': 'models/caves/cave_c_zero'}}}, 'TodSettings': {'AmbientColors': {0: Vec4(0.45, 0.53, 0.65, 1), 2: Vec4(1, 1, 1, 1), 4: Vec4(0.4, 0.45, 0.5, 1), 6: Vec4(0.44, 0.45, 0.56, 1), 8: Vec4(0.39, 0.42, 0.54, 1), 12: Vec4(0.34, 0.28, 0.41, 1), 13: Vec4(0.34, 0.28, 0.41, 1), 16: Vec4(0.25, 0.25, 0.25, 1)}, 'DirectionalColors': {0: Vec4(0.55, 0.46, 0.35, 1), 2: Vec4(1, 1, 1, 1), 4: Vec4(0.6, 0.34, 0.1, 1), 6: Vec4(0.46, 0.48, 0.45, 1), 8: Vec4(0.42, 0.42, 0.4, 1), 12: Vec4(0.66, 0.76, 0.05, 1), 13: Vec4(0.66, 0.76, 0.05, 1), 16: Vec4(0, 0, 0, 1)}, 'FogColors': {0: Vec4(0.3, 0.2, 0.15, 0), 2: Vec4(0.6, 0.694118, 0.894118, 1), 4: Vec4(0.3, 0.18, 0.15, 0), 6: Vec4(0.15, 0.2, 0.35, 0), 8: Vec4(0.05, 0.06, 0.17, 0), 12: Vec4(0.1, 0.12, 0.03, 0), 13: Vec4(0.1, 0.12, 0.03, 0), 16: Vec4(0.25, 0.25, 0.25, 1)}, 'FogRanges': {0: 0.0001, 2: 9.999999747378752e-05, 4: 0.0001, 6: 0.0001, 8: 0.0002, 12: 0.00025, 13: 0.00025, 16: 0.0001}, 'LinearFogRanges': {0: (0.0, 100.0), 2: (0.0, 100.0), 4: (0.0, 100.0), 6: (0.0, 100.0), 8: (0.0, 100.0), 12: (0.0, 100.0), 13: (0.0, 100.0), 16: (0.0, 100.0)}}, 'Node Links': [['1189637504.0dxschafe', '1176239104.0dxschafe', 'Bi-directional'], ['1189637504.0dxschafe0', '1189637504.0dxschafe', 'Bi-directional'], ['1189637504.0dxschafe0', '1189637504.0dxschafe1', 'Bi-directional']], 'Layers': {}, 'ObjectIds': {'1164929110.98sdnaik': '["Objects"]["1164929110.98sdnaik"]', '1164930102.27sdnaik': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1164930102.27sdnaik"]', '1164930102.28sdnaik': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1164930102.28sdnaik"]', '1176238208.0dxschafe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1176238208.0dxschafe"]', '1176238208.0dxschafe0': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1176238208.0dxschafe0"]', '1176238208.0dxschafe1': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1176238208.0dxschafe1"]', '1176239104.0dxschafe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1176239104.0dxschafe"]', '1188602752.0dxschafe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1188602752.0dxschafe"]', '1188602752.0dxschafe0': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1188602752.0dxschafe0"]', '1189033600.0dchiappe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189033600.0dchiappe"]', '1189033600.0dchiappe0': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189033600.0dchiappe0"]', '1189033728.0dchiappe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189033728.0dchiappe"]', '1189033856.0dchiappe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189033856.0dchiappe"]', '1189637504.0dxschafe': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189637504.0dxschafe"]', '1189637504.0dxschafe0': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189637504.0dxschafe0"]', '1189637504.0dxschafe1': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1189637504.0dxschafe1"]', '1245456238.69piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456238.69piwanow"]', '1245456279.94piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456279.94piwanow"]', '1245456317.19piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456317.19piwanow"]', '1245456422.86piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456422.86piwanow"]', '1245456456.48piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456456.48piwanow"]', '1245456481.61piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456481.61piwanow"]', '1245456495.84piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456495.84piwanow"]', '1245456520.91piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456520.91piwanow"]', '1245456614.55piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456614.55piwanow"]', '1245456628.66piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456628.66piwanow"]', '1245456780.69piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245456780.69piwanow"]', '1245457393.06piwanow': '["Objects"]["1164929110.98sdnaik"]["Objects"]["1245457393.06piwanow"]'}}
extraInfo = {'camPos': Point3(619.568, 288.857, 661.517), 'camHpr': VBase3(116.607, -43.8887, 0), 'focalLength': 1.39999997616, 'skyState': 2, 'fog': 0}
| 2,261.857143
| 15,389
| 0.620097
| 2,293
| 15,833
| 4.269952
| 0.158744
| 0.029823
| 0.025432
| 0.021244
| 0.669288
| 0.581044
| 0.551629
| 0.514861
| 0.475539
| 0.457869
| 0
| 0.223762
| 0.097328
| 15,833
| 7
| 15,390
| 2,261.857143
| 0.461307
| 0.014463
| 0
| 0
| 0
| 0
| 0.532692
| 0.148654
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ceb5e76d0143710c5d4c893218f92a4182cc221f
| 113,968
|
py
|
Python
|
idaes/models/unit_models/tests/test_separator.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/unit_models/tests/test_separator.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/unit_models/tests/test_separator.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | 1
|
2022-03-17T11:08:43.000Z
|
2022-03-17T11:08:43.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Separator unit model.
Author: Andrew Lee
"""
import pytest
from pyomo.environ import (
check_optimal_termination,
ConcreteModel,
Constraint,
Set,
value,
Var,
units as pyunits,
)
from pyomo.network import Port
from pyomo.common.config import ConfigBlock
from pyomo.util.check_units import assert_units_consistent
from idaes.core import (
FlowsheetBlock,
declare_process_block_class,
MaterialBalanceType,
StateBlockData,
StateBlock,
PhysicalParameterBlock,
Phase,
Component,
)
from idaes.models.unit_models.separator import (
Separator,
SeparatorData,
SplittingType,
EnergySplittingType,
)
from idaes.core.util.exceptions import (
BurntToast,
ConfigurationError,
InitializationError,
)
from idaes.models.properties.examples.saponification_thermo import (
SaponificationParameterBlock,
)
from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import (
BTXParameterBlock,
)
from idaes.models.properties import iapws95
from idaes.core.util.model_statistics import (
degrees_of_freedom,
number_variables,
number_total_constraints,
number_unused_variables,
)
from idaes.core.util.testing import (
PhysicalParameterTestBlock,
TestStateBlock,
initialization_tester,
)
from idaes.core.solvers import get_solver
import idaes.core.util.scaling as iscale
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
# -----------------------------------------------------------------------------
# Mockup classes for testing
@declare_process_block_class("SeparatorFrame")
class SeparatorFrameData(SeparatorData):
def build(self):
super(SeparatorData, self).build()
# -----------------------------------------------------------------------------
# Tests of Separator unit model construction methods
@pytest.mark.build
class TestBaseConstruction(object):
@pytest.fixture(scope="function")
def build(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(default={"property_package": m.fs.pp})
return m
@pytest.mark.unit
def test_separator_config(self, build):
assert len(build.fs.sep.config) == 14
assert build.fs.sep.config.dynamic is False
assert build.fs.sep.config.has_holdup is False
assert build.fs.sep.config.property_package == build.fs.pp
assert isinstance(build.fs.sep.config.property_package_args, ConfigBlock)
assert len(build.fs.sep.config.property_package_args) == 0
assert build.fs.sep.config.outlet_list is None
assert build.fs.sep.config.num_outlets is None
assert build.fs.sep.config.split_basis == SplittingType.totalFlow
assert build.fs.sep.config.ideal_separation is False
assert build.fs.sep.config.ideal_split_map is None
assert build.fs.sep.config.mixed_state_block is None
assert build.fs.sep.config.construct_ports is True
assert (
build.fs.sep.config.material_balance_type == MaterialBalanceType.useDefault
)
assert build.fs.sep.config.has_phase_equilibrium is False
@pytest.mark.unit
def test_validate_config_arguments(self, build):
build.fs.sep.config.has_phase_equilibrium = True
build.fs.sep.config.ideal_separation = True
with pytest.raises(ConfigurationError):
build.fs.sep._validate_config_arguments()
@pytest.mark.unit
def test_create_outlet_list_default(self, build):
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
for o in outlet_list:
assert o in ["outlet_1", "outlet_2"]
@pytest.mark.unit
def test_create_outlet_list_outlet_list(self, build):
build.fs.sep.config.outlet_list = ["foo", "bar"]
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
for o in outlet_list:
assert o in ["foo", "bar"]
@pytest.mark.unit
def test_create_outlet_list_num_outlets(self, build):
build.fs.sep.config.num_outlets = 3
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
for o in outlet_list:
assert o in ["outlet_1", "outlet_2", "outlet_3"]
@pytest.mark.unit
def test_create_outlet_list_both_args_consistent(self, build):
build.fs.sep.config.outlet_list = ["foo", "bar"]
build.fs.sep.config.num_outlets = 2
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
for o in outlet_list:
assert o in ["foo", "bar"]
@pytest.mark.unit
def test_create_outlet_list_both_args_inconsistent(self, build):
build.fs.sep.config.outlet_list = ["foo", "bar"]
build.fs.sep.config.num_outlets = 3
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
with pytest.raises(ConfigurationError):
build.fs.sep.create_outlet_list()
@pytest.mark.unit
def test_add_outlet_state_blocks(self, build):
build.fs.sep.config.outlet_list = ["foo", "bar"]
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
outlet_blocks = build.fs.sep.add_outlet_state_blocks(outlet_list)
assert isinstance(build.fs.sep.foo_state, StateBlock)
assert isinstance(build.fs.sep.bar_state, StateBlock)
assert len(outlet_blocks) == 2
for o in outlet_blocks:
assert isinstance(o, StateBlock)
assert o.local_name in ["foo_state", "bar_state"]
assert o[0].config.has_phase_equilibrium is False
assert o[0].config.defined_state is False
assert len(o[0].config) == 3
@pytest.mark.unit
def test_add_outlet_state_blocks_prop_pack_args(self, build):
build.fs.sep.config.property_package_args = {"test": 1}
build.fs.sep.config.outlet_list = ["foo", "bar"]
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
outlet_list = build.fs.sep.create_outlet_list()
outlet_blocks = build.fs.sep.add_outlet_state_blocks(outlet_list)
assert isinstance(build.fs.sep.foo_state, StateBlock)
assert isinstance(build.fs.sep.bar_state, StateBlock)
assert len(outlet_blocks) == 2
for o in outlet_blocks:
assert isinstance(o, StateBlock)
assert o.local_name in ["foo_state", "bar_state"]
assert o[0].config.has_phase_equilibrium is False
assert o[0].config.defined_state is False
assert len(o[0].config) == 4
assert o[0].config.test == 1
@pytest.mark.unit
def test_add_mixed_state_block(self, build):
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
mixed_block = build.fs.sep.add_mixed_state_block()
assert isinstance(mixed_block, StateBlock)
assert hasattr(build.fs.sep, "mixed_state")
assert not build.fs.sep.mixed_state[0].config.has_phase_equilibrium
assert build.fs.sep.mixed_state[0].config.defined_state
assert len(build.fs.sep.mixed_state[0].config) == 3
@pytest.mark.unit
def test_add_mixed_state_block_prop_pack_args(self, build):
build.fs.sep.config.property_package_args = {"test": 1}
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
mixed_block = build.fs.sep.add_mixed_state_block()
assert isinstance(mixed_block, StateBlock)
assert hasattr(build.fs.sep, "mixed_state")
assert not build.fs.sep.mixed_state[0].config.has_phase_equilibrium
assert build.fs.sep.mixed_state[0].config.defined_state
assert len(build.fs.sep.mixed_state[0].config) == 4
assert build.fs.sep.mixed_state[0].config.test == 1
@pytest.mark.unit
def test_get_mixed_state_block(self, build):
build.fs.sb = TestStateBlock(build.fs.time, default={"parameters": build.fs.pp})
build.fs.sep.config.mixed_state_block = build.fs.sb
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
mixed_block = build.fs.sep.get_mixed_state_block()
assert mixed_block == build.fs.sb
@pytest.mark.unit
def test_get_mixed_state_block_none(self, build):
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
with pytest.raises(BurntToast):
build.fs.sep.get_mixed_state_block()
@pytest.mark.unit
def test_get_mixed_state_block_mismatch(self, build):
build.fs.sb = TestStateBlock(build.fs.time, default={"parameters": build.fs.pp})
# Change parameters arg to create mismatch
build.fs.sb[0].config.parameters = None
build.fs.sep.config.mixed_state_block = build.fs.sb
build.fs.sep._get_property_package()
build.fs.sep._get_indexing_sets()
with pytest.raises(ConfigurationError):
build.fs.sep.get_mixed_state_block()
# -----------------------------------------------------------------------------
# Tests of Separator unit model scaling factors
@pytest.mark.unit
class TestBaseScaling(object):
"""Test scaling calculations. For now they just make sure there are no
exceptions. This can be expanded in the future.
"""
@pytest.fixture(scope="function")
def m(self):
b = ConcreteModel()
b.fs = FlowsheetBlock(default={"dynamic": False})
b.fs.pp = PhysicalParameterTestBlock()
return b
def test_no_exception_scaling_calc_external_mixed_state(self, m):
m.fs.sb = TestStateBlock(m.fs.time, default={"parameters": m.fs.pp})
m.fs.sep1 = Separator(
default={"property_package": m.fs.pp, "mixed_state_block": m.fs.sb}
)
iscale.calculate_scaling_factors(m)
def test_no_exception_scaling_calc_internal_mixed_state(self, m):
m.fs.sep1 = Separator(default={"property_package": m.fs.pp})
iscale.calculate_scaling_factors(m)
# -----------------------------------------------------------------------------
# Tests of Separator unit model non-ideal construction methods
@pytest.mark.build
class TestSplitConstruction(object):
@pytest.fixture(scope="function")
def build(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(default={"property_package": m.fs.pp})
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.outlet_blocks = m.fs.sep.add_outlet_state_blocks(m.outlet_list)
m.fs.sep.add_mixed_state_block()
return m
@pytest.mark.unit
def test_add_split_fractions_total(self, build):
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.outlet_idx, Set)
assert len(build.fs.sep.outlet_idx) == len(build.outlet_list)
assert isinstance(build.fs.sep.split_fraction, Var)
assert len(build.fs.sep.split_fraction) == 2
assert isinstance(build.fs.sep.sum_split_frac, Constraint)
assert len(build.fs.sep.sum_split_frac) == 1
@pytest.mark.unit
def test_add_split_fractions_phase(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.outlet_idx, Set)
assert len(build.fs.sep.outlet_idx) == len(build.outlet_list)
assert isinstance(build.fs.sep.split_fraction, Var)
assert len(build.fs.sep.split_fraction) == 4
for t in build.fs.time:
for o in build.fs.sep.outlet_idx:
for p in build.fs.sep.config.property_package.phase_list:
assert build.fs.sep.split_fraction[t, o, p].value == 0.5
assert isinstance(build.fs.sep.sum_split_frac, Constraint)
assert len(build.fs.sep.sum_split_frac) == 2
@pytest.mark.unit
def test_add_split_fractions_component(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.outlet_idx, Set)
assert len(build.fs.sep.outlet_idx) == len(build.outlet_list)
assert isinstance(build.fs.sep.split_fraction, Var)
assert len(build.fs.sep.split_fraction) == 4
for t in build.fs.time:
for o in build.fs.sep.outlet_idx:
for j in build.fs.sep.config.property_package.component_list:
assert build.fs.sep.split_fraction[t, o, j].value == 0.5
assert isinstance(build.fs.sep.sum_split_frac, Constraint)
assert len(build.fs.sep.sum_split_frac) == 2
@pytest.mark.unit
def test_add_split_fractions_phase_component(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.outlet_idx, Set)
assert len(build.fs.sep.outlet_idx) == len(build.outlet_list)
assert isinstance(build.fs.sep.split_fraction, Var)
assert len(build.fs.sep.split_fraction) == 8
for t in build.fs.time:
for o in build.fs.sep.outlet_idx:
for p in build.fs.sep.config.property_package.phase_list:
for j in build.fs.sep.config.property_package.component_list:
assert 0.5 == build.fs.sep.split_fraction[t, o, p, j].value
assert isinstance(build.fs.sep.sum_split_frac, Constraint)
assert len(build.fs.sep.sum_split_frac) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_total_no_equil(self, build):
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert not hasattr(build.fs.sep, "phase_equilibrium_generation")
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_phase_no_equil(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert not hasattr(build.fs.sep, "phase_equilibrium_generation")
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_component_no_equil(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert not hasattr(build.fs.sep, "phase_equilibrium_generation")
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_phase_component_no_equil(
self, build
):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert not hasattr(build.fs.sep, "phase_equilibrium_generation")
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_total_equil(self, build):
build.fs.sep.config.has_phase_equilibrium = True
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert isinstance(build.fs.sep.phase_equilibrium_generation, Var)
assert len(build.fs.sep.phase_equilibrium_generation) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_phase_equil(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.config.has_phase_equilibrium = True
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert isinstance(build.fs.sep.phase_equilibrium_generation, Var)
assert len(build.fs.sep.phase_equilibrium_generation) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_component_equil(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.config.has_phase_equilibrium = True
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert isinstance(build.fs.sep.phase_equilibrium_generation, Var)
assert len(build.fs.sep.phase_equilibrium_generation) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_pc_phase_component_equil(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.config.has_phase_equilibrium = True
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 8
assert isinstance(build.fs.sep.phase_equilibrium_generation, Var)
assert len(build.fs.sep.phase_equilibrium_generation) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_tc_total(self, build):
build.fs.sep.config.material_balance_type = MaterialBalanceType.componentTotal
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_tc_phase(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.componentTotal
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_tc_component(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.componentTotal
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_tc_phase_component(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.componentTotal
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 4
@pytest.mark.unit
def test_add_material_splitting_constraints_t_total(self, build):
build.fs.sep.config.material_balance_type = MaterialBalanceType.total
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 2
@pytest.mark.unit
def test_add_material_splitting_constraints_t_phase(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.total
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 2
@pytest.mark.unit
def test_add_material_splitting_constraints_t_component(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.total
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 2
@pytest.mark.unit
def test_add_material_splitting_constraints_t_phase_component(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.total
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.material_splitting_eqn, Constraint)
assert len(build.fs.sep.material_splitting_eqn) == 2
@pytest.mark.unit
def test_add_material_splitting_constraints_te_total(self, build):
build.fs.sep.config.material_balance_type = MaterialBalanceType.elementTotal
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
with pytest.raises(ConfigurationError):
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
@pytest.mark.unit
def test_add_material_splitting_constraints_none_total(self, build):
build.fs.sep.config.material_balance_type = MaterialBalanceType.none
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert not hasattr(build.fs.sep, "material_splitting_eqn")
@pytest.mark.unit
def test_add_material_splitting_constraints_none_phase(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.none
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert not hasattr(build.fs.sep, "material_splitting_eqn")
@pytest.mark.unit
def test_add_material_splitting_constraints_none_component(self, build):
build.fs.sep.config.split_basis = SplittingType.componentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.none
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert not hasattr(build.fs.sep, "material_splitting_eqn")
@pytest.mark.unit
def test_add_material_splitting_constraints_none_phase_component(self, build):
build.fs.sep.config.split_basis = SplittingType.phaseComponentFlow
build.fs.sep.config.material_balance_type = MaterialBalanceType.none
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_material_splitting_constraints(build.fs.sep.mixed_state)
assert not hasattr(build.fs.sep, "material_splitting_eqn")
@pytest.mark.unit
def test_add_energy_splitting_constraints(self, build):
assert (
build.fs.sep.config.energy_split_basis
== EnergySplittingType.equal_temperature
)
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_energy_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.temperature_equality_eqn, Constraint)
assert len(build.fs.sep.temperature_equality_eqn) == 2
@pytest.mark.unit
def test_add_energy_splitting_constraints_enthalpy(self, build):
build.fs.sep.config.energy_split_basis = (
EnergySplittingType.equal_molar_enthalpy
)
assert (
build.fs.sep.config.energy_split_basis
== EnergySplittingType.equal_molar_enthalpy
)
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_energy_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.molar_enthalpy_equality_eqn, Constraint)
assert len(build.fs.sep.molar_enthalpy_equality_eqn) == 2
@pytest.mark.unit
def test_add_momentum_splitting_constraints(self, build):
build.fs.sep.add_split_fractions(build.outlet_list, build.fs.sep.mixed_state)
build.fs.sep.add_momentum_splitting_constraints(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.pressure_equality_eqn, Constraint)
assert len(build.fs.sep.pressure_equality_eqn) == 2
@pytest.mark.unit
def test_add_inlet_port_objects(self, build):
build.fs.sep.add_inlet_port_objects(build.fs.sep.mixed_state)
assert isinstance(build.fs.sep.inlet, Port)
@pytest.mark.unit
def test_add_inlet_port_objects_construct_ports_False(self, build):
build.fs.sep.config.construct_ports = False
build.fs.sep.add_inlet_port_objects(build.fs.sep.mixed_state)
assert hasattr(build.fs.sep, "inlet") is False
@pytest.mark.unit
def test_add_outlet_port_objects(self, build):
build.fs.sep.add_outlet_port_objects(build.outlet_list, build.outlet_blocks)
assert isinstance(build.fs.sep.outlet_1, Port)
assert isinstance(build.fs.sep.outlet_2, Port)
@pytest.mark.unit
def test_add_outlet_port_objects_construct_ports_False(self, build):
build.fs.sep.config.construct_ports = False
build.fs.sep.add_outlet_port_objects(build.outlet_list, build.outlet_blocks)
assert hasattr(build.fs.sep, "outlet_1") is False
assert hasattr(build.fs.sep, "outlet_2") is False
# -----------------------------------------------------------------------------
class TestSaponification(object):
@pytest.fixture(scope="class")
def sapon(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = SaponificationParameterBlock()
m.fs.unit = Separator(
default={
"property_package": m.fs.properties,
"material_balance_type": MaterialBalanceType.componentPhase,
"split_basis": SplittingType.totalFlow,
"outlet_list": ["a", "B", "c"],
"ideal_separation": False,
"has_phase_equilibrium": False,
}
)
m.fs.unit.inlet.flow_vol.fix(1)
m.fs.unit.inlet.conc_mol_comp[0, "H2O"].fix(55388.0)
m.fs.unit.inlet.conc_mol_comp[0, "NaOH"].fix(100.0)
m.fs.unit.inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0)
m.fs.unit.inlet.conc_mol_comp[0, "SodiumAcetate"].fix(0.0)
m.fs.unit.inlet.conc_mol_comp[0, "Ethanol"].fix(0.0)
m.fs.unit.inlet.temperature.fix(303.15)
m.fs.unit.inlet.pressure.fix(101325.0)
m.fs.unit.split_fraction[0, "a"].fix(0.3)
m.fs.unit.split_fraction[0, "B"].fix(0.5)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, sapon):
assert hasattr(sapon.fs.unit, "inlet")
assert len(sapon.fs.unit.inlet.vars) == 4
assert hasattr(sapon.fs.unit.inlet, "flow_vol")
assert hasattr(sapon.fs.unit.inlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.inlet, "temperature")
assert hasattr(sapon.fs.unit.inlet, "pressure")
assert hasattr(sapon.fs.unit, "a")
assert len(sapon.fs.unit.a.vars) == 4
assert hasattr(sapon.fs.unit.a, "flow_vol")
assert hasattr(sapon.fs.unit.a, "conc_mol_comp")
assert hasattr(sapon.fs.unit.a, "temperature")
assert hasattr(sapon.fs.unit.a, "pressure")
assert hasattr(sapon.fs.unit, "B")
assert len(sapon.fs.unit.B.vars) == 4
assert hasattr(sapon.fs.unit.B, "flow_vol")
assert hasattr(sapon.fs.unit.B, "conc_mol_comp")
assert hasattr(sapon.fs.unit.B, "temperature")
assert hasattr(sapon.fs.unit.B, "pressure")
assert hasattr(sapon.fs.unit, "c")
assert len(sapon.fs.unit.c.vars) == 4
assert hasattr(sapon.fs.unit.c, "flow_vol")
assert hasattr(sapon.fs.unit.c, "conc_mol_comp")
assert hasattr(sapon.fs.unit.c, "temperature")
assert hasattr(sapon.fs.unit.c, "pressure")
assert isinstance(sapon.fs.unit.split_fraction, Var)
assert number_variables(sapon) == 35
assert number_total_constraints(sapon) == 25
assert number_unused_variables(sapon) == 0
@pytest.mark.component
def test_units(self, sapon):
assert_units_consistent(sapon)
@pytest.mark.unit
def test_dof(self, sapon):
assert degrees_of_freedom(sapon) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, sapon):
initialization_tester(sapon)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, sapon):
results = solver.solve(sapon)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, sapon):
assert pytest.approx(0.3, abs=1e-5) == value(sapon.fs.unit.a.flow_vol[0])
assert pytest.approx(101325.0, abs=1e-2) == value(sapon.fs.unit.a.pressure[0])
assert pytest.approx(303.15, abs=1e-2) == value(sapon.fs.unit.a.temperature[0])
assert pytest.approx(55388, abs=1e0) == value(
sapon.fs.unit.a.conc_mol_comp[0, "H2O"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.a.conc_mol_comp[0, "NaOH"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.a.conc_mol_comp[0, "EthylAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.a.conc_mol_comp[0, "SodiumAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.a.conc_mol_comp[0, "Ethanol"]
)
assert pytest.approx(0.5, abs=1e-5) == value(sapon.fs.unit.B.flow_vol[0])
assert pytest.approx(101325.0, abs=1e-2) == value(sapon.fs.unit.B.pressure[0])
assert pytest.approx(303.15, abs=1e-2) == value(sapon.fs.unit.B.temperature[0])
assert pytest.approx(55388, abs=1e0) == value(
sapon.fs.unit.B.conc_mol_comp[0, "H2O"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.B.conc_mol_comp[0, "NaOH"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.B.conc_mol_comp[0, "EthylAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.B.conc_mol_comp[0, "SodiumAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.B.conc_mol_comp[0, "Ethanol"]
)
assert pytest.approx(0.2, abs=1e-5) == value(sapon.fs.unit.c.flow_vol[0])
assert pytest.approx(101325.0, abs=1e-2) == value(sapon.fs.unit.c.pressure[0])
assert pytest.approx(303.15, abs=1e-2) == value(sapon.fs.unit.c.temperature[0])
assert pytest.approx(55388, abs=1e0) == value(
sapon.fs.unit.c.conc_mol_comp[0, "H2O"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.c.conc_mol_comp[0, "NaOH"]
)
assert pytest.approx(100.0, abs=1e-3) == value(
sapon.fs.unit.c.conc_mol_comp[0, "EthylAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.c.conc_mol_comp[0, "SodiumAcetate"]
)
assert pytest.approx(0.0, abs=1e-3) == value(
sapon.fs.unit.c.conc_mol_comp[0, "Ethanol"]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, sapon):
assert (
abs(
value(
sapon.fs.unit.inlet.flow_vol[0]
- sapon.fs.unit.a.flow_vol[0]
- sapon.fs.unit.B.flow_vol[0]
- sapon.fs.unit.c.flow_vol[0]
)
)
<= 1e-6
)
assert (
abs(
value(
sapon.fs.unit.inlet.flow_vol[0]
* sum(
sapon.fs.unit.inlet.conc_mol_comp[0, j]
for j in sapon.fs.properties.component_list
)
- sapon.fs.unit.a.flow_vol[0]
* sum(
sapon.fs.unit.a.conc_mol_comp[0, j]
for j in sapon.fs.properties.component_list
)
- sapon.fs.unit.B.flow_vol[0]
* sum(
sapon.fs.unit.B.conc_mol_comp[0, j]
for j in sapon.fs.properties.component_list
)
- sapon.fs.unit.c.flow_vol[0]
* sum(
sapon.fs.unit.c.conc_mol_comp[0, j]
for j in sapon.fs.properties.component_list
)
)
)
<= 1e-5
)
assert (
abs(
value(
(
sapon.fs.unit.inlet.flow_vol[0]
* sapon.fs.properties.dens_mol
* sapon.fs.properties.cp_mol
* (
sapon.fs.unit.inlet.temperature[0]
- sapon.fs.properties.temperature_ref
)
)
- (
sapon.fs.unit.a.flow_vol[0]
* sapon.fs.properties.dens_mol
* sapon.fs.properties.cp_mol
* (
sapon.fs.unit.a.temperature[0]
- sapon.fs.properties.temperature_ref
)
)
- (
sapon.fs.unit.B.flow_vol[0]
* sapon.fs.properties.dens_mol
* sapon.fs.properties.cp_mol
* (
sapon.fs.unit.B.temperature[0]
- sapon.fs.properties.temperature_ref
)
)
- (
sapon.fs.unit.c.flow_vol[0]
* sapon.fs.properties.dens_mol
* sapon.fs.properties.cp_mol
* (
sapon.fs.unit.c.temperature[0]
- sapon.fs.properties.temperature_ref
)
)
)
)
<= 1e-3
)
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, sapon):
sapon.fs.unit.report()
# -----------------------------------------------------------------------------
class TestBTXIdeal(object):
@pytest.fixture(scope="class")
def btx(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = BTXParameterBlock(
default={"valid_phase": ("Liq", "Vap"), "activity_coeff_model": "Ideal"}
)
m.fs.unit = Separator(
default={
"property_package": m.fs.properties,
"material_balance_type": MaterialBalanceType.componentPhase,
"split_basis": SplittingType.phaseFlow,
"ideal_separation": False,
"has_phase_equilibrium": True,
}
)
m.fs.unit.inlet.flow_mol[0].fix(1) # mol/s
m.fs.unit.inlet.temperature[0].fix(368) # K
m.fs.unit.inlet.pressure[0].fix(101325) # Pa
m.fs.unit.inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.inlet.mole_frac_comp[0, "toluene"].fix(0.5)
m.fs.unit.split_fraction[0, "outlet_1", "Vap"].fix(0.8)
m.fs.unit.split_fraction[0, "outlet_2", "Liq"].fix(0.8)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, btx):
assert hasattr(btx.fs.unit, "inlet")
assert len(btx.fs.unit.inlet.vars) == 4
assert hasattr(btx.fs.unit.inlet, "flow_mol")
assert hasattr(btx.fs.unit.inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.inlet, "temperature")
assert hasattr(btx.fs.unit.inlet, "pressure")
assert hasattr(btx.fs.unit, "outlet_1")
assert len(btx.fs.unit.outlet_1.vars) == 4
assert hasattr(btx.fs.unit.outlet_1, "flow_mol")
assert hasattr(btx.fs.unit.outlet_1, "mole_frac_comp")
assert hasattr(btx.fs.unit.outlet_1, "temperature")
assert hasattr(btx.fs.unit.outlet_1, "pressure")
assert hasattr(btx.fs.unit, "outlet_2")
assert len(btx.fs.unit.outlet_2.vars) == 4
assert hasattr(btx.fs.unit.outlet_2, "flow_mol")
assert hasattr(btx.fs.unit.outlet_2, "mole_frac_comp")
assert hasattr(btx.fs.unit.outlet_2, "temperature")
assert hasattr(btx.fs.unit.outlet_2, "pressure")
assert isinstance(btx.fs.unit.split_fraction, Var)
assert number_variables(btx) == 59
assert number_total_constraints(btx) == 52
assert number_unused_variables(btx) == 0
@pytest.mark.component
def test_units(self, btx):
assert_units_consistent(btx)
@pytest.mark.unit
def test_dof(self, btx):
assert degrees_of_freedom(btx) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialiszation(self, btx):
btx.fs.unit.initialize()
assert pytest.approx(1, abs=1e-4) == value(btx.fs.unit.mixed_state[0].flow_mol)
assert pytest.approx(0.604, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].flow_mol_phase["Liq"]
)
assert pytest.approx(0.396, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].flow_mol_phase["Vap"]
)
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.mixed_state[0].temperature
)
assert pytest.approx(101325, abs=1e3) == value(
btx.fs.unit.mixed_state[0].pressure
)
assert pytest.approx(0.412, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Liq", "benzene"]
)
assert pytest.approx(0.588, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Liq", "toluene"]
)
assert pytest.approx(0.634, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Vap", "benzene"]
)
assert pytest.approx(0.366, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Vap", "toluene"]
)
# Also trigger build of phase enthalpy vars.
btx.fs.unit.mixed_state[0].enth_mol_phase["Vap"] = 0.5
btx.fs.unit.outlet_1_state[0].enth_mol_phase["Vap"] = 0.5
btx.fs.unit.outlet_2_state[0].enth_mol_phase["Vap"] = 0.5
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, btx):
results = solver.solve(btx)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, btx):
assert pytest.approx(0.438, abs=1e-3) == value(btx.fs.unit.outlet_1.flow_mol[0])
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.outlet_1.temperature[0]
)
assert pytest.approx(101325, abs=1e3) == value(btx.fs.unit.outlet_1.pressure[0])
assert pytest.approx(0.573, abs=1e-3) == value(
btx.fs.unit.outlet_1.mole_frac_comp[0, "benzene"]
)
assert pytest.approx(0.427, abs=1e-3) == value(
btx.fs.unit.outlet_1.mole_frac_comp[0, "toluene"]
)
assert pytest.approx(0.562, abs=1e-3) == value(btx.fs.unit.outlet_2.flow_mol[0])
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.outlet_2.temperature[0]
)
assert pytest.approx(101325, abs=1e3) == value(btx.fs.unit.outlet_2.pressure[0])
assert pytest.approx(0.443, abs=1e-3) == value(
btx.fs.unit.outlet_2.mole_frac_comp[0, "benzene"]
)
assert pytest.approx(0.557, abs=1e-3) == value(
btx.fs.unit.outlet_2.mole_frac_comp[0, "toluene"]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, btx):
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
- btx.fs.unit.outlet_1.flow_mol[0]
- btx.fs.unit.outlet_2.flow_mol[0]
)
)
<= 1e-5
)
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
* btx.fs.unit.inlet.mole_frac_comp[0, "benzene"]
- btx.fs.unit.outlet_1.flow_mol[0]
* btx.fs.unit.outlet_1.mole_frac_comp[0, "benzene"]
- btx.fs.unit.outlet_2.flow_mol[0]
* btx.fs.unit.outlet_2.mole_frac_comp[0, "benzene"]
)
)
<= 1e-5
)
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
* btx.fs.unit.inlet.mole_frac_comp[0, "toluene"]
- btx.fs.unit.outlet_1.flow_mol[0]
* btx.fs.unit.outlet_1.mole_frac_comp[0, "toluene"]
- btx.fs.unit.outlet_2.flow_mol[0]
* btx.fs.unit.outlet_2.mole_frac_comp[0, "toluene"]
)
)
<= 1e-5
)
assert (
abs(
value(
btx.fs.unit.mixed_state[0].flow_mol_phase["Vap"]
* btx.fs.unit.mixed_state[0].enth_mol_phase["Vap"]
+ btx.fs.unit.mixed_state[0].flow_mol_phase["Liq"]
* btx.fs.unit.mixed_state[0].enth_mol_phase["Liq"]
- btx.fs.unit.outlet_1_state[0].flow_mol_phase["Vap"]
* btx.fs.unit.outlet_1_state[0].enth_mol_phase["Vap"]
- btx.fs.unit.outlet_1_state[0].flow_mol_phase["Liq"]
* btx.fs.unit.outlet_1_state[0].enth_mol_phase["Liq"]
- btx.fs.unit.outlet_2_state[0].flow_mol_phase["Vap"]
* btx.fs.unit.outlet_2_state[0].enth_mol_phase["Vap"]
- btx.fs.unit.outlet_2_state[0].flow_mol_phase["Liq"]
* btx.fs.unit.outlet_2_state[0].enth_mol_phase["Liq"]
)
)
<= 1e-1
)
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, btx):
btx.fs.unit.report()
# -----------------------------------------------------------------------------
@pytest.mark.iapws
@pytest.mark.skipif(not iapws95.iapws95_available(), reason="IAPWS not available")
class TestIAPWS(object):
@pytest.fixture(scope="class")
def iapws(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = iapws95.Iapws95ParameterBlock()
m.fs.unit = Separator(
default={
"property_package": m.fs.properties,
"material_balance_type": MaterialBalanceType.componentPhase,
"split_basis": SplittingType.componentFlow,
"num_outlets": 3,
"ideal_separation": False,
"has_phase_equilibrium": False,
}
)
m.fs.unit.inlet.flow_mol[0].fix(100)
m.fs.unit.inlet.enth_mol[0].fix(4000)
m.fs.unit.inlet.pressure[0].fix(101325)
m.fs.unit.split_fraction[0, "outlet_1", "H2O"].fix(0.4)
m.fs.unit.split_fraction[0, "outlet_2", "H2O"].fix(0.5)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, iapws):
assert len(iapws.fs.unit.inlet.vars) == 3
assert hasattr(iapws.fs.unit.inlet, "flow_mol")
assert hasattr(iapws.fs.unit.inlet, "enth_mol")
assert hasattr(iapws.fs.unit.inlet, "pressure")
assert hasattr(iapws.fs.unit, "outlet_1")
assert len(iapws.fs.unit.outlet_1.vars) == 3
assert hasattr(iapws.fs.unit.outlet_1, "flow_mol")
assert hasattr(iapws.fs.unit.outlet_1, "enth_mol")
assert hasattr(iapws.fs.unit.outlet_1, "pressure")
assert hasattr(iapws.fs.unit, "outlet_2")
assert len(iapws.fs.unit.outlet_2.vars) == 3
assert hasattr(iapws.fs.unit.outlet_2, "flow_mol")
assert hasattr(iapws.fs.unit.outlet_2, "enth_mol")
assert hasattr(iapws.fs.unit.outlet_2, "pressure")
assert hasattr(iapws.fs.unit, "outlet_3")
assert len(iapws.fs.unit.outlet_3.vars) == 3
assert hasattr(iapws.fs.unit.outlet_3, "flow_mol")
assert hasattr(iapws.fs.unit.outlet_3, "enth_mol")
assert hasattr(iapws.fs.unit.outlet_3, "pressure")
assert isinstance(iapws.fs.unit.split_fraction, Var)
assert number_variables(iapws) == 15
assert number_total_constraints(iapws) == 10
assert number_unused_variables(iapws) == 0
@pytest.mark.component
def test_units(self, iapws):
assert_units_consistent(iapws)
@pytest.mark.unit
def test_dof(self, iapws):
assert degrees_of_freedom(iapws) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialiszation(self, iapws):
iapws.fs.unit.initialize()
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, iapws):
results = solver.solve(iapws)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, iapws):
assert pytest.approx(40, abs=1e-3) == value(iapws.fs.unit.outlet_1.flow_mol[0])
assert pytest.approx(50, abs=1e-3) == value(iapws.fs.unit.outlet_2.flow_mol[0])
assert pytest.approx(10, abs=1e-3) == value(iapws.fs.unit.outlet_3.flow_mol[0])
assert pytest.approx(4000, abs=1e0) == value(iapws.fs.unit.outlet_1.enth_mol[0])
assert pytest.approx(4000, abs=1e0) == value(iapws.fs.unit.outlet_2.enth_mol[0])
assert pytest.approx(4000, abs=1e0) == value(iapws.fs.unit.outlet_3.enth_mol[0])
assert pytest.approx(101325, abs=1e2) == value(
iapws.fs.unit.outlet_1.pressure[0]
)
assert pytest.approx(101325, abs=1e2) == value(
iapws.fs.unit.outlet_2.pressure[0]
)
assert pytest.approx(101325, abs=1e2) == value(
iapws.fs.unit.outlet_3.pressure[0]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, iapws):
assert (
abs(
value(
iapws.fs.unit.inlet.flow_mol[0]
- iapws.fs.unit.outlet_1.flow_mol[0]
- iapws.fs.unit.outlet_2.flow_mol[0]
- iapws.fs.unit.outlet_3.flow_mol[0]
)
)
<= 1e-6
)
assert (
abs(
value(
iapws.fs.unit.inlet.flow_mol[0] * iapws.fs.unit.inlet.enth_mol[0]
- iapws.fs.unit.outlet_1.flow_mol[0]
* iapws.fs.unit.outlet_1.enth_mol[0]
- iapws.fs.unit.outlet_2.flow_mol[0]
* iapws.fs.unit.outlet_2.enth_mol[0]
- iapws.fs.unit.outlet_3.flow_mol[0]
* iapws.fs.unit.outlet_3.enth_mol[0]
)
)
<= 1e-2
)
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, iapws):
iapws.fs.unit.report()
# -----------------------------------------------------------------------------
# Define some generic Property Block classes for testing ideal separations
@declare_process_block_class("IdealTestBlock")
class _IdealParameterBlock(PhysicalParameterBlock):
def build(self):
super(_IdealParameterBlock, self).build()
self.p1 = Phase()
self.p2 = Phase()
self.c1 = Component()
self.c2 = Component()
self._phase_component_set = Set(
initialize=[("p1", "c1"), ("p1", "c2"), ("p2", "c1"), ("p2", "c2")]
)
self._state_block_class = IdealStateBlock
@classmethod
def define_metadata(cls, obj):
obj.add_default_units(
{
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.g,
"amount": pyunits.mol,
"temperature": pyunits.K,
}
)
@declare_process_block_class("IdealStateBlock", block_class=StateBlock)
class IdealTestBlockData(StateBlockData):
CONFIG = ConfigBlock(implicit=True)
def build(self):
super(IdealTestBlockData, self).build()
# Add an attribute to allow us to change the state variable definition
self._state_var_switch = 1
self.flow_mol_phase_comp = Var(
self.params.phase_list, self.params.component_list, initialize=2
)
self.flow_mol_phase = Var(self.params.phase_list, initialize=2)
self.flow_mol_comp = Var(self.params.component_list, initialize=2)
self.flow_mol = Var(initialize=2)
self.pressure = Var(initialize=1e5)
self.temperature = Var(initialize=300)
self.mole_frac_comp = Var(self.params.component_list, initialize=0.5)
self.mole_frac_phase_comp = Var(
self.params.phase_list, self.params.component_list, initialize=0.5
)
self.test_var = Var(initialize=1)
self.test_var_comp = Var(self.params.component_list, initialize=1)
self.test_var_phase = Var(self.params.phase_list, initialize=1)
self.test_var_phase_comp = Var(
self.params.phase_list, self.params.component_list, initialize=1
)
# Set some values to make sure partitioning is correct
self.flow_mol_phase_comp["p1", "c1"] = 1
self.flow_mol_phase_comp["p1", "c2"] = 2
self.flow_mol_phase_comp["p2", "c1"] = 3
self.flow_mol_phase_comp["p2", "c2"] = 4
self.flow_mol_phase["p1"] = 5
self.flow_mol_phase["p2"] = 6
self.flow_mol_comp["c1"] = 7
self.flow_mol_comp["c2"] = 8
self.flow_mol = 9
self.mole_frac_phase_comp["p1", "c1"] = 0.9
self.mole_frac_phase_comp["p1", "c2"] = 0.7
self.mole_frac_phase_comp["p2", "c1"] = 0.5
self.mole_frac_phase_comp["p2", "c2"] = 0.3
self.test_var_comp["c1"] = 2000
self.test_var_comp["c2"] = 3000
self.test_var_phase["p1"] = 4000
self.test_var_phase["p2"] = 5000
self.test_var_phase_comp["p1", "c1"] = 6000
self.test_var_phase_comp["p1", "c2"] = 7000
self.test_var_phase_comp["p2", "c1"] = 8000
self.test_var_phase_comp["p2", "c2"] = 9000
def define_state_vars(self):
if self._state_var_switch == 1:
return {"mole_frac_comp": self.mole_frac_comp}
elif self._state_var_switch == 2:
return {"mole_frac_phase_comp": self.mole_frac_phase_comp}
elif self._state_var_switch == 3:
return {"flow_mol_phase_comp": self.flow_mol_phase_comp}
elif self._state_var_switch == 4:
return {"flow_mol_phase": self.flow_mol_phase}
elif self._state_var_switch == 5:
return {"flow_mol_comp": self.flow_mol_comp}
elif self._state_var_switch == 6:
return {"temperature": self.temperature, "pressure": self.pressure}
elif self._state_var_switch == 7:
return {"test_var": self.test_var}
# -----------------------------------------------------------------------------
# Tests of Separator unit model ideal construction methods
@pytest.mark.build
class TestIdealConstruction(object):
@pytest.mark.unit
def test_phase_component(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert isinstance(m.fs.sep.outlet_1, Port)
assert isinstance(m.fs.sep.outlet_2, Port)
assert isinstance(m.fs.sep.outlet_3, Port)
assert isinstance(m.fs.sep.outlet_4, Port)
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c1"]) == 2.0
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.temperature[0]) == 300
assert value(m.fs.sep.outlet_1.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c2"]) == 2.0
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.temperature[0]) == 300
assert value(m.fs.sep.outlet_2.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_3.component_flow_phase[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_3.component_flow_phase[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_3.component_flow_phase[0, "p2", "c1"]) == 2.0
assert value(m.fs.sep.outlet_3.component_flow_phase[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_3.temperature[0]) == 300
assert value(m.fs.sep.outlet_3.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_4.component_flow_phase[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.component_flow_phase[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.component_flow_phase[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.component_flow_phase[0, "p2", "c2"]) == 2.0
assert value(m.fs.sep.outlet_4.temperature[0]) == 300
assert value(m.fs.sep.outlet_4.pressure[0]) == 1e5
@pytest.mark.unit
def test_phase(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert isinstance(m.fs.sep.outlet_1, Port)
assert isinstance(m.fs.sep.outlet_2, Port)
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c1"]) == 2.0
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c2"]) == 2.0
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.temperature[0]) == 300
assert value(m.fs.sep.outlet_1.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c1"]) == 2.0
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c2"]) == 2.0
assert value(m.fs.sep.outlet_2.temperature[0]) == 300
assert value(m.fs.sep.outlet_2.pressure[0]) == 1e5
@pytest.mark.unit
def test_component(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert isinstance(m.fs.sep.outlet_1, Port)
assert isinstance(m.fs.sep.outlet_2, Port)
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c1"]) == 2.0
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c1"]) == 2.0
assert value(m.fs.sep.outlet_1.component_flow_phase[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.temperature[0]) == 300
assert value(m.fs.sep.outlet_1.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p1", "c2"]) == 2.0
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.component_flow_phase[0, "p2", "c2"]) == 2.0
assert value(m.fs.sep.outlet_2.temperature[0]) == 300
assert value(m.fs.sep.outlet_2.pressure[0]) == 1e5
@pytest.mark.unit
def test_ideal_w_no_ports(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
"construct_ports": False,
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_ideal_w_total_flow(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.totalFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_ideal_w_no_split_map(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.totalFlow,
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_phase_component_mismatch(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {("p1", "c1"): "outlet_1", ("p1", "c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_component_mismatch(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_phase_mismatch(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_split_map_mismatch(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 1,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
with pytest.raises(ConfigurationError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_mole_frac_w_component_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {"c1": "outlet_1", "c2": "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c2"]) == 1
@pytest.mark.unit
def test_mole_frac_w_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {"p1": "outlet_1", "p2": "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c1"]) == 0.9
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c2"]) == 0.7
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c1"]) == 0.5
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c2"]) == 0.3
@pytest.mark.unit
def test_mole_frac_w_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_comp[0, "c2"]) == 1
assert value(m.fs.sep.outlet_3.mole_frac_comp[0, "c1"]) == 1
assert value(m.fs.sep.outlet_3.mole_frac_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.mole_frac_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.mole_frac_comp[0, "c2"]) == 1
@pytest.mark.unit
def test_mole_frac_w_phase_split_no_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {"p1": "outlet_1", "p2": "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
# Delete mole_frac_phase_comp so that the fallback should fail
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].mole_frac_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_mole_frac_phase_w_component_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {"c1": "outlet_1", "c2": "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 2
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c2"]) == 1
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c2"]) == 1
@pytest.mark.unit
def test_mole_frac_phase_w_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {"p1": "outlet_1", "p2": "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 2
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c1"]) == 0.9
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c2"]) == 0.7
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c1"]) == 0.5
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c2"]) == 0.3
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c1"]) == 0.9
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c2"]) == 0.7
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c1"]) == 0.5
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c2"]) == 0.3
@pytest.mark.unit
def test_mole_frac_phase_w_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 2
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c1"]) == 1
assert value(m.fs.sep.outlet_1.mole_frac_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p1", "c2"]) == 1
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.mole_frac_phase_comp[0, "p2", "c2"]) == 1
assert value(m.fs.sep.outlet_3.mole_frac_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_3.mole_frac_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_3.mole_frac_phase_comp[0, "p2", "c1"]) == 1
assert value(m.fs.sep.outlet_3.mole_frac_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.mole_frac_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.mole_frac_phase_comp[0, "p1", "c2"]) == 1
assert value(m.fs.sep.outlet_4.mole_frac_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.mole_frac_phase_comp[0, "p2", "c2"]) == 1
@pytest.mark.unit
def test_flow_phase_comp_w_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 3
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c2"]) == 2
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_3.flow_mol_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_3.flow_mol_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_3.flow_mol_phase_comp[0, "p2", "c1"]) == 3
assert value(m.fs.sep.outlet_3.flow_mol_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_phase_comp[0, "p2", "c2"]) == 4
@pytest.mark.unit
def test_flow_phase_comp_w_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 3
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c2"]) == 2
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c1"]) == 3
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c2"]) == 4
@pytest.mark.unit
def test_flow_phase_comp_w_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 3
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p1", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c1"]) == 3
assert value(m.fs.sep.outlet_1.flow_mol_phase_comp[0, "p2", "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p1", "c2"]) == 2
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase_comp[0, "p2", "c2"]) == 4
@pytest.mark.unit
def test_flow_phase_w_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 4
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p1"]) == 2
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p2"]) == 1e-8
assert value(m.fs.sep.outlet_3.flow_mol_phase[0, "p1"]) == 1e-8
assert value(m.fs.sep.outlet_3.flow_mol_phase[0, "p2"]) == 3
assert value(m.fs.sep.outlet_4.flow_mol_phase[0, "p1"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_phase[0, "p2"]) == 4
@pytest.mark.unit
def test_flow_phase_w_phase_comp_split_no_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 4
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].flow_mol_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_flow_phase_w_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 4
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p1"]) == 5
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p2"]) == 6
@pytest.mark.unit
def test_flow_phase_w_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 4
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_phase[0, "p2"]) == 3
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p1"]) == 2
assert value(m.fs.sep.outlet_2.flow_mol_phase[0, "p2"]) == 4
@pytest.mark.unit
def test_flow_phase_w_comp_split_no_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 4
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].flow_mol_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_flow_comp_w_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 5
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c2"]) == 2
assert value(m.fs.sep.outlet_3.flow_mol_comp[0, "c1"]) == 3
assert value(m.fs.sep.outlet_3.flow_mol_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_4.flow_mol_comp[0, "c2"]) == 4
@pytest.mark.unit
def test_flow_comp_w_phase_comp_split_no_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 5
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].flow_mol_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_flow_comp_w_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 5
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c1"]) == 1
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c2"]) == 2
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c1"]) == 3
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c2"]) == 4
@pytest.mark.unit
def test_flow_comp_w_phase_split_no_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 5
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].flow_mol_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_flow_comp_w_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 5
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c1"]) == 7
assert value(m.fs.sep.outlet_1.flow_mol_comp[0, "c2"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c1"]) == 1e-8
assert value(m.fs.sep.outlet_2.flow_mol_comp[0, "c2"]) == 8
@pytest.mark.unit
def test_t_p(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 6
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.temperature[0]) == 300
assert value(m.fs.sep.outlet_1.pressure[0]) == 1e5
assert value(m.fs.sep.outlet_2.temperature[0]) == 300
assert value(m.fs.sep.outlet_2.pressure[0]) == 1e5
@pytest.mark.unit
def test_general_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.test_var[0]) == 2000
assert value(m.fs.sep.outlet_2.test_var[0]) == 3000
@pytest.mark.unit
def test_general_comp_split_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.mixed_state[0].del_component(m.fs.sep.mixed_state[0].test_var_comp)
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.test_var[0]) == 14000
assert value(m.fs.sep.outlet_2.test_var[0]) == 16000
@pytest.mark.unit
def test_general_comp_split_fallback_fail(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.componentFlow,
"ideal_split_map": {("c1"): "outlet_1", ("c2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.mixed_state[0].del_component(m.fs.sep.mixed_state[0].test_var_comp)
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].test_var_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_general_phase_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.test_var[0]) == 4000
assert value(m.fs.sep.outlet_2.test_var[0]) == 5000
@pytest.mark.unit
def test_general_phase_split_fallback(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.mixed_state[0].del_component(m.fs.sep.mixed_state[0].test_var_phase)
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.test_var[0]) == 13000
assert value(m.fs.sep.outlet_2.test_var[0]) == 17000
@pytest.mark.unit
def test_general_phase_split_fallback_fail(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 2,
"ideal_separation": True,
"split_basis": SplittingType.phaseFlow,
"ideal_split_map": {("p1"): "outlet_1", ("p2"): "outlet_2"},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.mixed_state[0].del_component(m.fs.sep.mixed_state[0].test_var_phase)
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].test_var_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
@pytest.mark.unit
def test_general_phase_comp_split(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
assert value(m.fs.sep.outlet_1.test_var[0]) == 6000
assert value(m.fs.sep.outlet_2.test_var[0]) == 7000
assert value(m.fs.sep.outlet_3.test_var[0]) == 8000
assert value(m.fs.sep.outlet_4.test_var[0]) == 9000
@pytest.mark.unit
def test_general_phase_comp_split_fallback_fail(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = IdealTestBlock()
m.fs.sep = SeparatorFrame(
default={
"property_package": m.fs.pp,
"num_outlets": 4,
"ideal_separation": True,
"split_basis": SplittingType.phaseComponentFlow,
"ideal_split_map": {
("p1", "c1"): "outlet_1",
("p1", "c2"): "outlet_2",
("p2", "c1"): "outlet_3",
("p2", "c2"): "outlet_4",
},
}
)
m.fs.sep._get_property_package()
m.fs.sep._get_indexing_sets()
m.outlet_list = m.fs.sep.create_outlet_list()
m.fs.sep.add_mixed_state_block()
m.fs.sep.mixed_state[0]._state_var_switch = 7
m.fs.sep.mixed_state[0].del_component(
m.fs.sep.mixed_state[0].test_var_phase_comp
)
with pytest.raises(AttributeError):
m.fs.sep.partition_outlet_flows(m.fs.sep.mixed_state, m.outlet_list)
# -----------------------------------------------------------------------------
class TestBTX_Ideal(object):
@pytest.fixture(scope="class")
def btx(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = BTXParameterBlock()
m.fs.unit = Separator(
default={
"property_package": m.fs.properties,
"material_balance_type": MaterialBalanceType.componentPhase,
"split_basis": SplittingType.phaseFlow,
"ideal_separation": True,
"ideal_split_map": {"Vap": "outlet_1", "Liq": "outlet_2"},
"has_phase_equilibrium": False,
}
)
m.fs.unit.inlet.flow_mol[0].fix(1) # mol/s
m.fs.unit.inlet.temperature[0].fix(368) # K
m.fs.unit.inlet.pressure[0].fix(101325) # Pa
m.fs.unit.inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.inlet.mole_frac_comp[0, "toluene"].fix(0.5)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, btx):
assert hasattr(btx.fs.unit, "inlet")
assert len(btx.fs.unit.inlet.vars) == 4
assert hasattr(btx.fs.unit.inlet, "flow_mol")
assert hasattr(btx.fs.unit.inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.inlet, "temperature")
assert hasattr(btx.fs.unit.inlet, "pressure")
assert hasattr(btx.fs.unit, "outlet_1")
assert len(btx.fs.unit.outlet_1.vars) == 4
assert hasattr(btx.fs.unit.outlet_1, "flow_mol")
assert hasattr(btx.fs.unit.outlet_1, "mole_frac_comp")
assert hasattr(btx.fs.unit.outlet_1, "temperature")
assert hasattr(btx.fs.unit.outlet_1, "pressure")
assert hasattr(btx.fs.unit, "outlet_2")
assert len(btx.fs.unit.outlet_2.vars) == 4
assert hasattr(btx.fs.unit.outlet_2, "flow_mol")
assert hasattr(btx.fs.unit.outlet_2, "mole_frac_comp")
assert hasattr(btx.fs.unit.outlet_2, "temperature")
assert hasattr(btx.fs.unit.outlet_2, "pressure")
assert number_variables(btx) == 17
assert number_total_constraints(btx) == 12
assert number_unused_variables(btx) == 0
@pytest.mark.component
def test_units(self, btx):
assert_units_consistent(btx)
@pytest.mark.unit
def test_dof(self, btx):
assert degrees_of_freedom(btx) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialiszation(self, btx):
btx.fs.unit.initialize()
assert pytest.approx(1, abs=1e-4) == value(btx.fs.unit.mixed_state[0].flow_mol)
assert pytest.approx(0.604, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].flow_mol_phase["Liq"]
)
assert pytest.approx(0.396, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].flow_mol_phase["Vap"]
)
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.mixed_state[0].temperature
)
assert pytest.approx(101325, abs=1e3) == value(
btx.fs.unit.mixed_state[0].pressure
)
assert pytest.approx(0.412, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Liq", "benzene"]
)
assert pytest.approx(0.588, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Liq", "toluene"]
)
assert pytest.approx(0.634, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Vap", "benzene"]
)
assert pytest.approx(0.366, abs=1e-3) == value(
btx.fs.unit.mixed_state[0].mole_frac_phase_comp["Vap", "toluene"]
)
assert degrees_of_freedom(btx) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, btx):
results = solver.solve(btx)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, btx):
assert pytest.approx(0.396, abs=1e-3) == value(btx.fs.unit.outlet_1.flow_mol[0])
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.outlet_1.temperature[0]
)
assert pytest.approx(101325, abs=1e3) == value(btx.fs.unit.outlet_1.pressure[0])
assert pytest.approx(0.634, abs=1e-3) == value(
btx.fs.unit.outlet_1.mole_frac_comp[0, "benzene"]
)
assert pytest.approx(0.366, abs=1e-3) == value(
btx.fs.unit.outlet_1.mole_frac_comp[0, "toluene"]
)
assert pytest.approx(0.604, abs=1e-3) == value(btx.fs.unit.outlet_2.flow_mol[0])
assert pytest.approx(368.0, abs=1e-1) == value(
btx.fs.unit.outlet_2.temperature[0]
)
assert pytest.approx(101325, abs=1e3) == value(btx.fs.unit.outlet_2.pressure[0])
assert pytest.approx(0.412, abs=1e-3) == value(
btx.fs.unit.outlet_2.mole_frac_comp[0, "benzene"]
)
assert pytest.approx(0.588, abs=1e-3) == value(
btx.fs.unit.outlet_2.mole_frac_comp[0, "toluene"]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, btx):
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
- btx.fs.unit.outlet_1.flow_mol[0]
- btx.fs.unit.outlet_2.flow_mol[0]
)
)
<= 1e-5
)
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
* btx.fs.unit.inlet.mole_frac_comp[0, "benzene"]
- btx.fs.unit.outlet_1.flow_mol[0]
* btx.fs.unit.outlet_1.mole_frac_comp[0, "benzene"]
- btx.fs.unit.outlet_2.flow_mol[0]
* btx.fs.unit.outlet_2.mole_frac_comp[0, "benzene"]
)
)
<= 1e-5
)
assert (
abs(
value(
btx.fs.unit.inlet.flow_mol[0]
* btx.fs.unit.inlet.mole_frac_comp[0, "toluene"]
- btx.fs.unit.outlet_1.flow_mol[0]
* btx.fs.unit.outlet_1.mole_frac_comp[0, "toluene"]
- btx.fs.unit.outlet_2.flow_mol[0]
* btx.fs.unit.outlet_2.mole_frac_comp[0, "toluene"]
)
)
<= 1e-5
)
# Assume energy conservation is covered by control volume tests
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, btx):
btx.fs.unit.report()
@pytest.mark.unit
def test_initialization_error():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.pp = PhysicalParameterTestBlock()
m.fs.sep = Separator(default={"property_package": m.fs.pp})
m.fs.sep.outlet_1_state[0].material_flow_mol.fix(10)
m.fs.sep.outlet_2_state[0].material_flow_mol.fix(10)
m.fs.sep.mixed_state[0].material_flow_mol.fix(100)
m.fs.sep.split_fraction.fix()
with pytest.raises(InitializationError):
m.fs.sep.initialize()
| 37.989333
| 88
| 0.604512
| 15,251
| 113,968
| 4.283063
| 0.02826
| 0.064604
| 0.047672
| 0.03417
| 0.922506
| 0.91058
| 0.890005
| 0.860657
| 0.836959
| 0.818374
| 0
| 0.028494
| 0.257248
| 113,968
| 2,999
| 89
| 38.002001
| 0.743166
| 0.021252
| 0
| 0.661277
| 0
| 0
| 0.065532
| 0.003306
| 0
| 0
| 0
| 0
| 0.217872
| 1
| 0.05617
| false
| 0
| 0.006383
| 0
| 0.073617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ceb675d1f27644cd6b5f13e368fe6346c710ae7c
| 123
|
py
|
Python
|
mmtbx/geometry/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/geometry/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/geometry/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex # import dependency
| 30.75
| 64
| 0.853659
| 16
| 123
| 6.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 3
| 65
| 41
| 0.899083
| 0.138211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0c6d63d9664b9405c9b70295d7db4e8e4166e7d7
| 170
|
py
|
Python
|
poezio/core/__init__.py
|
mathiasertl/poezio
|
49b785d5be879353c6b1a5f98cfe173d3c8fff15
|
[
"Zlib"
] | null | null | null |
poezio/core/__init__.py
|
mathiasertl/poezio
|
49b785d5be879353c6b1a5f98cfe173d3c8fff15
|
[
"Zlib"
] | null | null | null |
poezio/core/__init__.py
|
mathiasertl/poezio
|
49b785d5be879353c6b1a5f98cfe173d3c8fff15
|
[
"Zlib"
] | null | null | null |
"""
Core class, split into smaller chunks
"""
__all__ = ['Core', 'Command', 'Status']
from poezio.core.core import Core
from poezio.core.structs import Command, Status
| 18.888889
| 47
| 0.723529
| 23
| 170
| 5.173913
| 0.565217
| 0.218487
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141176
| 170
| 8
| 48
| 21.25
| 0.815068
| 0.217647
| 0
| 0
| 0
| 0
| 0.137097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c76c9730980b217ebbda28b5dbebee31e377c96
| 81
|
py
|
Python
|
tikzify/foundation/__init__.py
|
NeilGirdhar/tikzify
|
5de296c118188e532788234971de387f9fe1416e
|
[
"MIT"
] | 3
|
2019-12-26T23:49:13.000Z
|
2022-03-04T23:31:19.000Z
|
tikzify/foundation/__init__.py
|
NeilGirdhar/tikzify
|
5de296c118188e532788234971de387f9fe1416e
|
[
"MIT"
] | 2
|
2019-12-09T14:42:51.000Z
|
2022-01-21T20:47:06.000Z
|
tikzify/foundation/__init__.py
|
NeilGirdhar/tikzify
|
5de296c118188e532788234971de387f9fe1416e
|
[
"MIT"
] | null | null | null |
from .contexts import *
from .formatter import *
from .pf import *
del contexts
| 13.5
| 24
| 0.740741
| 11
| 81
| 5.454545
| 0.545455
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 81
| 5
| 25
| 16.2
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cd1a99fc2b209b1c0da7211909e5c86b3ac3c31
| 175
|
py
|
Python
|
src/tzer/tir/semantic/__init__.py
|
Tzer-AnonBot/tzer
|
07799222118f757bdcb6a14654a6addda2dcf55c
|
[
"Apache-2.0"
] | 47
|
2021-12-16T19:48:49.000Z
|
2022-03-24T03:14:14.000Z
|
src/tzer/tir/semantic/__init__.py
|
Tzer-AnonBot/tzer
|
07799222118f757bdcb6a14654a6addda2dcf55c
|
[
"Apache-2.0"
] | null | null | null |
src/tzer/tir/semantic/__init__.py
|
Tzer-AnonBot/tzer
|
07799222118f757bdcb6a14654a6addda2dcf55c
|
[
"Apache-2.0"
] | 4
|
2021-10-16T20:36:58.000Z
|
2022-01-25T04:27:49.000Z
|
from .context import Context
from .constraint import PrimExprConstraint, VarConstraint, StmtConstraint, BlockConstraint, PrimFuncConstraint
from .constraint import Constraint
| 43.75
| 110
| 0.868571
| 16
| 175
| 9.5
| 0.5625
| 0.184211
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 175
| 3
| 111
| 58.333333
| 0.955975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cdd7d7dfc0970b82f9522fe131b0c5d6b915978
| 92
|
py
|
Python
|
src/kaa/easings.py
|
mmicek/kaa
|
3583edf19b0e453c7de6c316a08d9eda72a1fcfc
|
[
"MIT"
] | 17
|
2019-07-10T12:24:53.000Z
|
2022-02-19T21:39:19.000Z
|
src/kaa/easings.py
|
mmicek/kaa
|
3583edf19b0e453c7de6c316a08d9eda72a1fcfc
|
[
"MIT"
] | 29
|
2019-07-10T12:30:58.000Z
|
2021-12-30T15:33:44.000Z
|
src/kaa/easings.py
|
mmicek/kaa
|
3583edf19b0e453c7de6c316a08d9eda72a1fcfc
|
[
"MIT"
] | 8
|
2019-03-26T23:08:40.000Z
|
2022-01-10T03:39:59.000Z
|
from ._kaa import Easing, ease, ease_between
__all__ = ('Easing', 'ease', 'ease_between')
| 18.4
| 44
| 0.706522
| 12
| 92
| 4.833333
| 0.583333
| 0.344828
| 0.482759
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 4
| 45
| 23
| 0.734177
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0b2cc00f176773a4dd06f4ddfa29f1401824de8f
| 18,467
|
py
|
Python
|
test/test_unit/test_cli.py
|
davidmreed/amaxa
|
b850c39b48b6076d412f3bcab0404f27d52b1c4f
|
[
"BSD-3-Clause"
] | 52
|
2019-02-13T20:43:02.000Z
|
2022-03-22T17:45:51.000Z
|
test/test_unit/test_cli.py
|
davidmreed/amaxa
|
b850c39b48b6076d412f3bcab0404f27d52b1c4f
|
[
"BSD-3-Clause"
] | 50
|
2019-04-21T13:09:15.000Z
|
2022-01-01T17:39:19.000Z
|
test/test_unit/test_cli.py
|
davidmreed/amaxa
|
b850c39b48b6076d412f3bcab0404f27d52b1c4f
|
[
"BSD-3-Clause"
] | 13
|
2019-03-20T09:14:02.000Z
|
2021-10-06T13:53:37.000Z
|
import io
import json
import unittest
from unittest.mock import Mock
import yaml
import amaxa
from amaxa import constants
from amaxa.__main__ import main
CREDENTIALS_GOOD_YAML = """
version: 1
credentials:
username: 'test@example.com'
password: 'blah'
security-token: '00000'
sandbox: True
"""
CREDENTIALS_GOOD_JSON = """
{
"version": 1,
"credentials": {
"username": "test@example.com",
"password": "blah",
"security-token": "00000",
"sandbox": true
}
}
"""
CREDENTIALS_BAD = """
credentials:
username: 'test@example.com'
password: 'blah'
security-token: '00000'
sandbox: True
"""
EXTRACTION_GOOD_YAML = """
version: 1
operation:
-
sobject: Account
fields:
- Name
- Id
- ParentId
extract:
all: True
"""
EXTRACTION_GOOD_JSON = """
{
"version": 1,
"extraction": [
{
"sobject": "Account",
"fields": [
"Name",
"Id",
"ParentId"
],
"extract": {
"all": true
}
}
]
}
"""
EXTRACTION_GOOD_YAML_API_VERSION = """
version: 2
options:
api-version: "45.0"
operation:
-
sobject: Account
fields:
- Name
- Id
- ParentId
extract:
all: True
"""
EXTRACTION_BAD_YAML_API_VERSION = """
version: 2
options:
api-version: 45
operation:
-
sobject: Account
fields:
- Name
- Id
- ParentId
extract:
all: True
"""
EXTRACTION_BAD = """
operation:
-
sobject: Account
fields:
- Name
- Id
- ParentId
extract:
all: True
"""
STATE_GOOD_YAML = """
version: 1
state:
stage: inserts
id-map:
'001000000000001': '001000000000002'
'001000000000003': '001000000000004'
"""
state_file = io.StringIO()
def select_file(f, *args, **kwargs):
data = {
"credentials-bad.yaml": CREDENTIALS_BAD,
"extraction-bad.yaml": EXTRACTION_BAD,
"extraction-good.yaml": EXTRACTION_GOOD_YAML,
"extraction-good-api.yaml": EXTRACTION_GOOD_YAML_API_VERSION,
"extraction-bad-api.yaml": EXTRACTION_BAD_YAML_API_VERSION,
"credentials-good.yaml": CREDENTIALS_GOOD_YAML,
"credentials-good.json": CREDENTIALS_GOOD_JSON,
"extraction-good.json": EXTRACTION_GOOD_JSON,
"state-good.yaml": STATE_GOOD_YAML,
"extraction-good.state.yaml": state_file,
}
if type(data[f]) is str:
m = unittest.mock.mock_open(read_data=data[f])(f, *args, **kwargs)
m.name = f
else:
m = data[f]
return m
class test_CLI(unittest.TestCase):
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_calls_execute_with_json_input_extract_mode(
self, operation_mock, credential_mock
):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
["amaxa", "-c", "credentials-good.json", "extraction-good.json"],
):
return_value = main()
credential_mock.assert_called_once_with(
json.loads(CREDENTIALS_GOOD_JSON), constants.OPTION_DEFAULTS["api-version"]
)
operation_mock.assert_called_once_with(
json.loads(EXTRACTION_GOOD_JSON), context
)
context.run.assert_called_once_with()
self.assertEqual(0, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.LoadOperationLoader")
def test_main_calls_execute_with_json_input_load_mode(
self, operation_mock, credential_mock
):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.json",
"--load",
"extraction-good.json",
],
):
return_value = main()
credential_mock.assert_called_once_with(
json.loads(CREDENTIALS_GOOD_JSON), constants.OPTION_DEFAULTS["api-version"]
)
operation_mock.assert_called_once_with(
json.loads(EXTRACTION_GOOD_JSON), context, use_state=False
)
context.run.assert_called_once_with()
self.assertEqual(0, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_calls_execute_with_yaml_input(self, operation_mock, credential_mock):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
["amaxa", "-c", "credentials-good.yaml", "extraction-good.yaml"],
):
return_value = main()
credential_mock.assert_called_once_with(
yaml.safe_load(CREDENTIALS_GOOD_YAML),
constants.OPTION_DEFAULTS["api-version"],
)
operation_mock.assert_called_once_with(
yaml.safe_load(EXTRACTION_GOOD_YAML), context
)
context.run.assert_called_once_with()
self.assertEqual(0, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_returns_error_with_bad_credentials(
self, operation_mock, credential_mock
):
context = Mock()
credential_mock.return_value = Mock()
credential_mock.return_value.result = None
credential_mock.return_value.errors = ["Test error occured."]
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
["amaxa", "-c", "credentials-bad.yaml", "extraction-good.yaml"],
):
return_value = main()
credential_mock.assert_called_once_with(
yaml.safe_load(CREDENTIALS_BAD), constants.OPTION_DEFAULTS["api-version"]
)
context.run.assert_not_called()
self.assertEqual(-1, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_returns_error_with_bad_extraction(
self, operation_mock, credential_mock
):
context = Mock()
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = None
operation_mock.return_value.errors = ["Test error occured."]
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
["amaxa", "-c", "credentials-good.yaml", "extraction-bad.yaml"],
):
return_value = main()
credential_mock.assert_called_once_with(
yaml.safe_load(CREDENTIALS_GOOD_YAML),
constants.OPTION_DEFAULTS["api-version"],
)
operation_mock.assert_called_once_with(yaml.safe_load(EXTRACTION_BAD), context)
self.assertEqual(-1, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.StateLoader")
@unittest.mock.patch("amaxa.__main__.LoadOperationLoader")
def test_main_returns_error_with_bad_state_file(
self, operation_mock, state_mock, credential_mock
):
credential_mock.return_value.errors = []
operation_mock.return_value.errors = []
state_mock.return_value.result = None
state_mock.return_value.errors = ["Test error occured."]
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"--load",
"-c",
"credentials-good.yaml",
"extraction-good.yaml",
"-s",
"state-good.yaml",
],
):
return_value = main()
credential_mock.assert_called_once_with(
yaml.safe_load(CREDENTIALS_GOOD_YAML),
constants.OPTION_DEFAULTS["api-version"],
)
state_mock.assert_called_once_with(
yaml.safe_load(STATE_GOOD_YAML), operation_mock.return_value.result
)
self.assertEqual(-1, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_returns_error_with_errors_during_extraction(
self, operation_mock, credential_mock
):
context = Mock()
op = Mock()
op.run = Mock(return_value=-1)
op.stage = amaxa.LoadStage.INSERTS
op.global_id_map = {}
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = op
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
["amaxa", "-c", "credentials-good.yaml", "extraction-good.yaml"],
):
return_value = main()
self.assertEqual(-1, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.LoadOperationLoader")
def test_main_saves_state_on_error(self, operation_mock, credential_mock):
context = Mock()
op = Mock()
op.run = Mock(return_value=-1)
op.stage = amaxa.LoadStage.INSERTS
op.global_id_map = {
amaxa.SalesforceId("001000000000001"): amaxa.SalesforceId("001000000000002")
}
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = op
operation_mock.return_value.errors = []
state_file.close = Mock()
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.yaml",
"--load",
"extraction-good.yaml",
],
):
return_value = main()
self.assertEqual(-1, return_value)
contents = state_file.getvalue()
self.assertLess(0, len(contents))
state_file.close.assert_called_once_with()
yaml_state = yaml.safe_load(io.StringIO(contents))
self.assertIn("state", yaml_state)
self.assertIn("id-map", yaml_state["state"])
self.assertIn("stage", yaml_state["state"])
self.assertEqual(amaxa.LoadStage.INSERTS.value, yaml_state["state"]["stage"])
self.assertEqual(
{str(k): str(v) for k, v in op.global_id_map.items()},
yaml_state["state"]["id-map"],
)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.LoadOperationLoader")
def test_main_loads_state_with_use_state_option(
self, operation_mock, credential_mock
):
context = Mock()
op = Mock()
op.run = Mock(return_value=0)
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = op
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.yaml",
"--load",
"extraction-good.yaml",
"--use-state",
"state-good.yaml",
],
):
return_value = main()
self.assertEqual(0, return_value)
self.assertEqual(amaxa.LoadStage.INSERTS, op.stage)
self.assertEqual(
{
amaxa.SalesforceId("001000000000001"): amaxa.SalesforceId(
"001000000000002"
),
amaxa.SalesforceId("001000000000003"): amaxa.SalesforceId(
"001000000000004"
),
},
op.global_id_map,
)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_stops_with_check_only(self, operation_mock, credential_mock):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.json",
"extraction-good.json",
"--check-only",
],
):
return_value = main()
credential_mock.assert_called_once_with(
json.loads(CREDENTIALS_GOOD_JSON), constants.OPTION_DEFAULTS["api-version"]
)
operation_mock.assert_called_once_with(
json.loads(EXTRACTION_GOOD_JSON), context
)
context.run.assert_not_called()
self.assertEqual(0, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_uses_specified_api_version(self, operation_mock, credential_mock):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.yaml",
"extraction-good-api.yaml",
],
):
return_value = main()
credential_mock.assert_called_once_with(
yaml.safe_load(CREDENTIALS_GOOD_YAML), "45.0"
)
operation_mock.assert_called_once_with(
yaml.safe_load(EXTRACTION_GOOD_YAML_API_VERSION), context
)
self.assertEqual(0, return_value)
@unittest.mock.patch("amaxa.__main__.CredentialLoader")
@unittest.mock.patch("amaxa.__main__.ExtractionOperationLoader")
def test_main_errors_bad_api_version(self, operation_mock, credential_mock):
context = Mock()
context.run.return_value = 0
credential_mock.return_value = Mock()
credential_mock.return_value.result = context
credential_mock.return_value.errors = []
operation_mock.return_value = Mock()
operation_mock.return_value.result = context
operation_mock.return_value.errors = []
m = Mock(side_effect=select_file)
with unittest.mock.patch("builtins.open", m):
with unittest.mock.patch(
"sys.argv",
[
"amaxa",
"-c",
"credentials-good.yaml",
"extraction-bad-api.yaml",
],
):
return_value = main()
credential_mock.assert_not_called()
operation_mock.assert_not_called()
self.assertEqual(-1, return_value)
| 32.569665
| 88
| 0.592679
| 1,872
| 18,467
| 5.528312
| 0.070513
| 0.110542
| 0.107257
| 0.081167
| 0.875737
| 0.837955
| 0.816697
| 0.802106
| 0.783554
| 0.764905
| 0
| 0.015636
| 0.300428
| 18,467
| 566
| 89
| 32.627208
| 0.785432
| 0
| 0
| 0.688
| 0
| 0
| 0.206368
| 0.067526
| 0
| 0
| 0
| 0
| 0.086
| 1
| 0.026
| false
| 0.006
| 0.016
| 0
| 0.046
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b7c12d02a15bdeed63c4784603821ec276ca3fa
| 42
|
py
|
Python
|
mayan/apps/rest_api/tests/__init__.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/rest_api/tests/__init__.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2020-06-06T00:00:48.000Z
|
2022-03-12T00:03:54.000Z
|
mayan/apps/rest_api/tests/__init__.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from .base import BaseAPITestCase # NOQA
| 21
| 41
| 0.785714
| 5
| 42
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 1
| 42
| 42
| 0.942857
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0baf1e294200a821525292f171cf516e7b580174
| 191
|
py
|
Python
|
project/recipes/__init__.py
|
Soumyajit7/recipe-app
|
4e76842e052b2b04d7ff936953b5ecdcde41f77b
|
[
"BSD-2-Clause"
] | null | null | null |
project/recipes/__init__.py
|
Soumyajit7/recipe-app
|
4e76842e052b2b04d7ff936953b5ecdcde41f77b
|
[
"BSD-2-Clause"
] | null | null | null |
project/recipes/__init__.py
|
Soumyajit7/recipe-app
|
4e76842e052b2b04d7ff936953b5ecdcde41f77b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
The `recipes` blueprint handles displaying recipes.
"""
from flask import Blueprint
recipes_blueprint = Blueprint('recipes', __name__, template_folder='templates')
from . import routes
| 21.222222
| 79
| 0.774869
| 21
| 191
| 6.761905
| 0.619048
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120419
| 191
| 8
| 80
| 23.875
| 0.845238
| 0.267016
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0bb9b2148d34cd7b64c3babd3b6a8c4ff63c98c3
| 47
|
py
|
Python
|
solid_backend/content/tests/conftest.py
|
zentrumnawi/solid-backend
|
0a6ac51608d4c713903856bb9b0cbf0068aa472c
|
[
"MIT"
] | 1
|
2021-01-24T11:54:01.000Z
|
2021-01-24T11:54:01.000Z
|
solid_backend/quiz/tests/conftest.py
|
zentrumnawi/solid-backend
|
0a6ac51608d4c713903856bb9b0cbf0068aa472c
|
[
"MIT"
] | 112
|
2020-04-22T10:07:03.000Z
|
2022-03-29T15:25:26.000Z
|
solid_backend/slideshow/tests/conftest.py
|
zentrumnawi/solid-backend
|
0a6ac51608d4c713903856bb9b0cbf0068aa472c
|
[
"MIT"
] | null | null | null |
from .conftest_files.general_conftest import *
| 23.5
| 46
| 0.851064
| 6
| 47
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7f119ad576e69550a7d197caba29e3f05d8cdf1
| 3,040
|
py
|
Python
|
streetteam/apps/twilio_integration/tests/views/twilio_webhook_test.py
|
alysivji/street-team
|
fe891d738b449956d56fe5e53535b98fa04d9a3a
|
[
"MIT"
] | 2
|
2020-01-22T17:49:10.000Z
|
2021-06-18T19:35:23.000Z
|
streetteam/apps/twilio_integration/tests/views/twilio_webhook_test.py
|
alysivji/street-team
|
fe891d738b449956d56fe5e53535b98fa04d9a3a
|
[
"MIT"
] | 41
|
2019-11-08T18:28:16.000Z
|
2022-03-12T00:28:51.000Z
|
streetteam/apps/twilio_integration/tests/views/twilio_webhook_test.py
|
alysivji/street-team
|
fe891d738b449956d56fe5e53535b98fa04d9a3a
|
[
"MIT"
] | null | null | null |
import json
from django.conf import settings
import pytest
from twilio.request_validator import RequestValidator
@pytest.fixture
def create_twilio_headers():
validator = RequestValidator(settings.TWILIO_AUTH_TOKEN)
def wrapper(uri, data):
return {"HTTP_X_TWILIO_SIGNATURE": validator.compute_signature(uri, data)}
return wrapper
@pytest.mark.django_db
def test_send_SMS__receive_error_message(client, create_twilio_headers):
# Arrange
filepath = "streetteam/apps/twilio_integration/tests/files/twilio_webhook__send_sms.json"
with open(filepath, "r") as read_file:
data = json.load(read_file)
uri = "http://testserver/sms/twilio/callback/"
headers = create_twilio_headers(uri, data)
# Act
resp = client.post(uri, data=data, **headers)
# Assert
assert b"Something went wrong" in resp.getvalue()
@pytest.mark.django_db
def test_send_1_picture_MMS__receive_thank_you_message(client, create_twilio_headers):
# Arrange
filepath = "streetteam/apps/twilio_integration/tests/files/twilio_webhook__attach_1_picture.json"
with open(filepath, "r") as read_file:
data = json.load(read_file)
uri = "http://testserver/sms/twilio/callback/"
headers = create_twilio_headers(uri, data)
# Act
resp = client.post(uri, data=data, **headers)
# Assert
assert b"Received 1 picture(s)! Thank you!" in resp.getvalue()
@pytest.mark.django_db
def test_send_3_picture_MMS__receive_thank_you_message(client, create_twilio_headers):
# Arrange
filepath = "streetteam/apps/twilio_integration/tests/files/twilio_webhook__attach_3_pictures.json"
with open(filepath, "r") as read_file:
data = json.load(read_file)
uri = "http://testserver/sms/twilio/callback/"
headers = create_twilio_headers(uri, data)
# Act
resp = client.post(uri, data=data, **headers)
# Assert
assert b"Received 3 picture(s)! Thank you!" in resp.getvalue()
@pytest.mark.django_db
def test_send_5_picture_MMS__receive_thank_you_message(client, create_twilio_headers):
# Arrange
filepath = "streetteam/apps/twilio_integration/tests/files/twilio_webhook__attach_5_pictures.json"
with open(filepath, "r") as read_file:
data = json.load(read_file)
uri = "http://testserver/sms/twilio/callback/"
headers = create_twilio_headers(uri, data)
# Act
resp = client.post(uri, data=data, **headers)
# Assert
assert b"Received 5 picture(s)! Thank you!" in resp.getvalue()
@pytest.mark.django_db
def test_send_6_picture_MMS__receive_error_message(client, create_twilio_headers):
# Arrange
filepath = "streetteam/apps/twilio_integration/tests/files/twilio_webhook__attach_6_pictures.json"
with open(filepath, "r") as read_file:
data = json.load(read_file)
uri = "http://testserver/sms/twilio/callback/"
headers = create_twilio_headers(uri, data)
# Act
resp = client.post(uri, data=data, **headers)
# Assert
assert b"Something went wrong" in resp.getvalue()
| 31.666667
| 102
| 0.732237
| 412
| 3,040
| 5.126214
| 0.177184
| 0.039773
| 0.098958
| 0.042614
| 0.85464
| 0.85464
| 0.85464
| 0.840909
| 0.840909
| 0.840909
| 0
| 0.004319
| 0.162171
| 3,040
| 95
| 103
| 32
| 0.824892
| 0.030921
| 0
| 0.581818
| 0
| 0
| 0.263481
| 0.149488
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.127273
| false
| 0
| 0.072727
| 0.018182
| 0.236364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f03138b205d86184eb9589af056fb6e264be81b3
| 90
|
py
|
Python
|
dja/routers.py
|
mcovalt/dja
|
b0c852c4941a805cd6aa8a6d2aca6d332ba41c7d
|
[
"MIT"
] | 1
|
2020-09-11T16:12:58.000Z
|
2020-09-11T16:12:58.000Z
|
dja/routers.py
|
mcovalt/dja
|
b0c852c4941a805cd6aa8a6d2aca6d332ba41c7d
|
[
"MIT"
] | null | null | null |
dja/routers.py
|
mcovalt/dja
|
b0c852c4941a805cd6aa8a6d2aca6d332ba41c7d
|
[
"MIT"
] | null | null | null |
from rest_framework import routers
class ResourceRouter(routers.SimpleRouter):
pass
| 15
| 43
| 0.811111
| 10
| 90
| 7.2
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 90
| 5
| 44
| 18
| 0.935065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f049a43cabf158409966078e0f35e552167f69c7
| 249
|
py
|
Python
|
robovat/math/__init__.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 62
|
2020-04-08T11:26:24.000Z
|
2021-09-06T02:45:53.000Z
|
robovat/math/__init__.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 7
|
2020-04-12T13:10:10.000Z
|
2022-03-12T00:15:03.000Z
|
robovat/math/__init__.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 17
|
2020-04-12T17:37:01.000Z
|
2021-09-07T01:51:46.000Z
|
from robovat.math.euler import Euler
from robovat.math.orientation import Orientation
from robovat.math.point import Point
from robovat.math.pose import get_transform
from robovat.math.pose import Pose
from robovat.math.quaternion import Quaternion
| 35.571429
| 48
| 0.855422
| 37
| 249
| 5.72973
| 0.297297
| 0.311321
| 0.424528
| 0.179245
| 0.235849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 249
| 6
| 49
| 41.5
| 0.942222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f07eb752abda2a39af7758379af46614f49b4162
| 24
|
py
|
Python
|
dk/__init__.py
|
therj/ulauncher-docker
|
1cd3bc3b386197c48713b9973ce3504855b1770b
|
[
"MIT"
] | null | null | null |
dk/__init__.py
|
therj/ulauncher-docker
|
1cd3bc3b386197c48713b9973ce3504855b1770b
|
[
"MIT"
] | null | null | null |
dk/__init__.py
|
therj/ulauncher-docker
|
1cd3bc3b386197c48713b9973ce3504855b1770b
|
[
"MIT"
] | null | null | null |
from dk.client import *
| 12
| 23
| 0.75
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2b7bbd8d4446043806a3c378f477587eae313c3
| 161
|
py
|
Python
|
ezcliy/__init__.py
|
kpostekk/ezcliy
|
f3038a4e1d482895a311bfb699d3c04c6975faea
|
[
"MIT"
] | null | null | null |
ezcliy/__init__.py
|
kpostekk/ezcliy
|
f3038a4e1d482895a311bfb699d3c04c6975faea
|
[
"MIT"
] | 2
|
2021-06-02T03:52:32.000Z
|
2021-08-19T21:26:03.000Z
|
ezcliy/__init__.py
|
kpostekk/ezcliy
|
f3038a4e1d482895a311bfb699d3c04c6975faea
|
[
"MIT"
] | null | null | null |
"""Framework for creating cli tools."""
from ezcliy.commands import Command
from ezcliy.parameters import Flag, KeyVal
from ezcliy.positional import Positional
| 26.833333
| 42
| 0.813665
| 21
| 161
| 6.238095
| 0.666667
| 0.229008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118012
| 161
| 5
| 43
| 32.2
| 0.922535
| 0.204969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2e12777bb435b609bada846f13d3b7aac1c6697
| 11,948
|
py
|
Python
|
tests/flickr/test_management_commands.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 54
|
2016-08-15T17:32:41.000Z
|
2022-02-27T03:32:05.000Z
|
tests/flickr/test_management_commands.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 229
|
2015-07-23T12:50:47.000Z
|
2022-03-24T10:33:20.000Z
|
tests/flickr/test_management_commands.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 8
|
2015-09-10T17:10:35.000Z
|
2022-03-25T13:05:01.000Z
|
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from ditto.flickr.factories import AccountFactory, UserFactory
class FetchFlickrAccountUserTestCase(TestCase):
def setUp(self):
# What we'll use as return values from UserIdFetcher().fetch()...
self.id_fetcher_success = {
"success": True,
"id": "99999999999@N99",
"fetched": 1,
}
# ...and UserFetcher().fetch():
self.user_fetcher_success = {
"success": True,
"user": {"name": "Phil Gyford"},
"fetched": 1,
}
self.account = AccountFactory(id=32, user=None)
self.out = StringIO()
self.out_err = StringIO()
def test_fail_with_no_args(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_account_user")
def test_fail_with_invalid_id(self):
call_command("fetch_flickr_account_user", id="3", stderr=self.out_err)
self.assertIn("No Account found with an id of '3'", self.out_err.getvalue())
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_with_id(self, id_fetcher, user_fetcher):
UserFactory(nsid="99999999999@N99")
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = self.user_fetcher_success
call_command("fetch_flickr_account_user", id="32", stdout=self.out)
self.assertIn("Fetched and saved user 'Phil Gyford'", self.out.getvalue())
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_invalid_nsid(self, id_fetcher, user_fetcher):
"""
Correct error message if we fail to find a user for the fetched
Flickr ID (unlikely).
"""
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = {
"success": False,
"messages": ["Oops"],
}
call_command("fetch_flickr_account_user", id="32", stderr=self.out_err)
self.assertIn(
"Failed to fetch a user using Flickr ID '99999999999@N99': Oops",
self.out_err.getvalue(),
)
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_no_matching_nsid(self, id_fetcher):
"Correct error message if we can't find a Flickr ID for this Account."
id_fetcher.return_value.fetch.return_value = {
"success": False,
"messages": ["Oops"],
}
call_command("fetch_flickr_account_user", id="32", stderr=self.out_err)
self.assertIn(
"Failed to fetch a Flickr ID for this Account: Oops",
self.out_err.getvalue(),
)
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_associates_account_with_user(self, id_fetcher, user_fetcher):
"After fetching and saving the user, associate it with the Account."
UserFactory(nsid="99999999999@N99")
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = self.user_fetcher_success
call_command("fetch_flickr_account_user", id="32", stdout=self.out)
self.account.refresh_from_db()
self.assertEqual(self.account.user.nsid, "99999999999@N99")
class FetchFlickrOriginalsTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_true_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_originals", "--all", account="99999999999@N99")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(fetch_all=True)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_true_to_fetcher_no_account(self, fetcher):
call_command("fetch_flickr_originals", "--all")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(fetch_all=True)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_false_to_fetcher(self, fetcher):
call_command("fetch_flickr_originals")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(fetch_all=False)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": 33}
]
call_command("fetch_flickr_originals", stdout=self.out)
self.assertIn("Phil Gyford: Fetched 33 Files", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": 33}
]
call_command("fetch_flickr_originals", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": False, "messages": ["Oops"]}
]
call_command("fetch_flickr_originals", stdout=self.out, stderr=self.out_err)
self.assertIn(
"Phil Gyford: Failed to fetch Files: Oops", self.out_err.getvalue()
)
class FetchFlickrPhotosTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
def test_fail_with_no_args(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos")
def test_fail_with_account_only(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos", account="99999999999@N99")
def test_fail_with_non_numeric_days(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos", days="foo")
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_days_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photos", account="99999999999@N99", days="4")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(days=4)
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_days_to_fetcher_no_account(self, fetcher):
call_command("fetch_flickr_photos", days="4")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(days=4)
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_all_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photos", account="99999999999@N99", days="all")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(days="all")
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photos", days="4", stdout=self.out)
self.assertIn("Phil Gyford: Fetched 40 Photos", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photos", days="4", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": False, "messages": ["Oops"]}
]
call_command(
"fetch_flickr_photos", days="4", stdout=self.out, stderr=self.out_err
)
self.assertIn(
"Phil Gyford: Failed to fetch Photos: Oops", self.out_err.getvalue()
)
class FetchFlickrPhotosetsTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_calls_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photosets", account="99999999999@N99")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_calls_fetcher_with_no_account(self, fetcher):
call_command("fetch_flickr_photosets")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photosets", stdout=self.out)
self.assertIn("Phil Gyford: Fetched 40 Photosets", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photosets", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "Phil Gyford", "success": False, "messages": ["Oops"]}
]
call_command("fetch_flickr_photosets", stdout=self.out, stderr=self.out_err)
self.assertIn(
"Phil Gyford: Failed to fetch Photosets: Oops", self.out_err.getvalue()
)
| 42.671429
| 112
| 0.687981
| 1,375
| 11,948
| 5.714182
| 0.093091
| 0.070001
| 0.052946
| 0.072801
| 0.866743
| 0.854652
| 0.828179
| 0.81736
| 0.798651
| 0.78096
| 0
| 0.029122
| 0.201038
| 11,948
| 279
| 113
| 42.824373
| 0.793945
| 0.042099
| 0
| 0.54661
| 0
| 0
| 0.318446
| 0.203105
| 0
| 0
| 0
| 0
| 0.144068
| 1
| 0.127119
| false
| 0
| 0.025424
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
650893800db219b4887551171cb9d9294edf2019
| 180
|
py
|
Python
|
pylib/__init__.py
|
Zig1375/CycleGAN-Tensorflow-2
|
7a10f31c5f093c861d273e1414dcf13d278026c4
|
[
"MIT"
] | 581
|
2018-05-06T05:15:05.000Z
|
2022-03-29T08:13:54.000Z
|
pylib/__init__.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 52
|
2018-05-11T09:33:30.000Z
|
2022-03-24T04:27:07.000Z
|
pylib/__init__.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 137
|
2018-05-08T14:30:03.000Z
|
2022-02-24T01:50:37.000Z
|
from pylib.argument import *
from pylib.processing import *
from pylib.path import *
from pylib.serialization import *
from pylib.timer import *
import pprint
pp = pprint.pprint
| 18
| 33
| 0.783333
| 25
| 180
| 5.64
| 0.4
| 0.319149
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 180
| 9
| 34
| 20
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0.285714
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6541447edd813cde8bcd271bda8522e1fbbdda1b
| 4,878
|
py
|
Python
|
norns/status/tests/test_models.py
|
the-norns/norns
|
8856626fb6937452c123e4629a5888a49a82c349
|
[
"MIT"
] | null | null | null |
norns/status/tests/test_models.py
|
the-norns/norns
|
8856626fb6937452c123e4629a5888a49a82c349
|
[
"MIT"
] | 62
|
2018-05-19T22:18:01.000Z
|
2018-05-26T00:13:21.000Z
|
norns/status/tests/test_models.py
|
the-norns/norns
|
8856626fb6937452c123e4629a5888a49a82c349
|
[
"MIT"
] | 3
|
2018-05-19T18:54:28.000Z
|
2018-05-21T02:14:47.000Z
|
from django.test import TestCase
from model_mommy import mommy
from enemy.models import Enemy
from player.models import Player
from room.models import Room
from ..models import Ability
class TestModelsAbility(TestCase):
"""
Test models.
"""
fixtures = [
'status/fixtures/fixture.json',
'fixture',
]
def setUp(self):
"""
Create items.
"""
self.player = mommy.make(Player)
room = self.player.tile.room
self.enemy = mommy.make(Enemy, tile=self.player.tile)
mommy.make(Room, room_north=room)
mommy.make(Room, room_south=room)
mommy.make(Room, room_east=room)
mommy.make(Room, room_west=room)
def tearDown(self):
"""
Destroy items.
"""
Enemy.objects.all().delete()
Player.objects.all().delete()
def test_run_safe(self):
"""
Test ability to use actions.
"""
safe = Ability.objects.filter(action='SR').first()
self.assertEqual(safe.use_ability(self.player, None), 'You used Safe')
self.assertIsNone(Enemy.objects.filter(pk=self.enemy.pk).first())
def test_run_safe_room_north(self):
"""
Test ability to use actions.
"""
safe = Ability.objects.filter(action='SR').first()
self.enemy.tile = (
self.player.tile.room.room_north.tile_set.order_by('?').first())
self.assertEqual(safe.use_ability(self.player, None), 'You used Safe')
self.assertIsNone(Enemy.objects.filter(pk=self.enemy.pk).first())
def test_run_out_of_room(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.order_by('?').first()
self.enemy.tile = (
self.player.tile.room.room_east.tile_set.order_by('?').first())
self.assertEqual(
ability.use_ability(self.player, self.enemy),
'No target found.')
class TestModels(TestCase):
"""
Test models.
"""
fixtures = [
'status/fixtures/fixture.json',
'fixture',
]
def setUp(self):
"""
Create items.
"""
self.player = mommy.make(Player)
self.enemy = mommy.make(Enemy, tile=self.player.tile)
def tearDown(self):
"""
Destroy items.
"""
Enemy.objects.all().delete()
Player.objects.all().delete()
def test_run_1(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=1)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_2(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=2)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_3(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=3)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_4(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=4)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_5(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=5)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_6(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=6)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_7(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=7)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_8(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=8)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_9(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=9)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_10(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=10)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
def test_run_11(self):
"""
Test ability to use actions.
"""
ability = Ability.objects.get(pk=11)
self.assertIsNotNone(ability)
ability.use_ability(self.player, self.enemy)
| 26.950276
| 78
| 0.583231
| 566
| 4,878
| 4.924028
| 0.125442
| 0.115536
| 0.050233
| 0.085396
| 0.880517
| 0.857912
| 0.857912
| 0.827772
| 0.827772
| 0.755651
| 0
| 0.007532
| 0.292333
| 4,878
| 180
| 79
| 27.1
| 0.799826
| 0.100246
| 0
| 0.510638
| 0
| 0
| 0.030172
| 0.014199
| 0
| 0
| 0
| 0
| 0.170213
| 1
| 0.191489
| false
| 0
| 0.06383
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
335e28b1437b4dd4b0d397aeffa261dc037c5fc0
| 220
|
py
|
Python
|
Ex107/moeda.py
|
Fernando-Rodrigo/Exercicios
|
04fe641220f36df85a754b2944d60f245cf6cabd
|
[
"MIT"
] | 1
|
2022-03-14T20:49:04.000Z
|
2022-03-14T20:49:04.000Z
|
Ex107/moeda.py
|
Fernando-Rodrigo/Exercicios
|
04fe641220f36df85a754b2944d60f245cf6cabd
|
[
"MIT"
] | null | null | null |
Ex107/moeda.py
|
Fernando-Rodrigo/Exercicios
|
04fe641220f36df85a754b2944d60f245cf6cabd
|
[
"MIT"
] | null | null | null |
def aumentar(valor, taxa):
return valor + (valor * (taxa/100))
def diminuir(valor, taxa):
return valor - (valor * (taxa / 100))
def dobro(valor):
return valor * 2
def metade(valor):
return valor / 2
| 15.714286
| 41
| 0.622727
| 30
| 220
| 4.566667
| 0.333333
| 0.262774
| 0.218978
| 0.291971
| 0.510949
| 0.510949
| 0.510949
| 0.510949
| 0
| 0
| 0
| 0.047904
| 0.240909
| 220
| 14
| 42
| 15.714286
| 0.772455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
68828f8d2a7d1552c48e66ff95683e9643e6051c
| 38
|
py
|
Python
|
s/gen.py
|
marcusbuffett/uciengine
|
91f4d86f3c4f7c0bf19d083a7285e605462e2fa8
|
[
"MIT"
] | 6
|
2021-01-29T19:06:12.000Z
|
2022-01-30T20:15:41.000Z
|
s/gen.py
|
marcusbuffett/uciengine
|
91f4d86f3c4f7c0bf19d083a7285e605462e2fa8
|
[
"MIT"
] | null | null | null |
s/gen.py
|
marcusbuffett/uciengine
|
91f4d86f3c4f7c0bf19d083a7285e605462e2fa8
|
[
"MIT"
] | 2
|
2022-01-22T03:31:12.000Z
|
2022-01-30T20:04:40.000Z
|
print("nothing to be done for gen.py")
| 38
| 38
| 0.736842
| 8
| 38
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0.74359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6892b091b53146fe0c6dcc65f653f7cf649d6441
| 273
|
py
|
Python
|
kedro_to_dataiku/__init__.py
|
ppvastar/kedro_to_dataiku
|
570582340a85ef1094a7df350ab66d51c13d73e5
|
[
"MIT"
] | 2
|
2021-07-07T09:33:47.000Z
|
2021-07-17T18:19:42.000Z
|
kedro_to_dataiku/__init__.py
|
ppvastar/kedro_to_dataiku
|
570582340a85ef1094a7df350ab66d51c13d73e5
|
[
"MIT"
] | null | null | null |
kedro_to_dataiku/__init__.py
|
ppvastar/kedro_to_dataiku
|
570582340a85ef1094a7df350ab66d51c13d73e5
|
[
"MIT"
] | null | null | null |
from kedro_to_dataiku.kedro_to_dataiku import clone_from_git,copy_lib,return_env, get_node,run_node,act_on_project,change_dataset_format,create_datasets,load_input_datasets, create_recipes,create_zones,create_all,delete_all
from kedro_to_dataiku.version import __version__
| 91
| 223
| 0.912088
| 45
| 273
| 4.933333
| 0.644444
| 0.094595
| 0.189189
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03663
| 273
| 2
| 224
| 136.5
| 0.844106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d7ee22c91a87ce91c8715d0df8530711ce09d094
| 40,092
|
py
|
Python
|
tests/aat/spec/packet_generator_spec.py
|
loskutnikov-spirent/openperf
|
1f36ad31d6b8ce5d45c835e405ecc4e4b9793fd2
|
[
"Apache-2.0"
] | null | null | null |
tests/aat/spec/packet_generator_spec.py
|
loskutnikov-spirent/openperf
|
1f36ad31d6b8ce5d45c835e405ecc4e4b9793fd2
|
[
"Apache-2.0"
] | null | null | null |
tests/aat/spec/packet_generator_spec.py
|
loskutnikov-spirent/openperf
|
1f36ad31d6b8ce5d45c835e405ecc4e4b9793fd2
|
[
"Apache-2.0"
] | null | null | null |
from mamba import description, before, after
from expects import *
import os
import client.api
import client.models
from common import Config, Service
from common.helper import (make_traffic_template,
get_first_port_id,
default_traffic_packet_template_with_seq_modifiers,
default_traffic_packet_template_with_list_modifiers,
packet_generator_model,
packet_generator_models)
from common.matcher import (be_valid_packet_generator,
be_valid_packet_generator_result,
be_valid_transmit_flow,
raise_api_exception)
from common.helper import check_modules_exists
CONFIG = Config(os.path.join(os.path.dirname(__file__),
os.environ.get('MAMBA_CONFIG', 'config.yaml')))
CUSTOM_DATA = "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdCwg\
c2VkIGRvIGVpdXNtb2QgdGVtcG9yIGluY2lkaWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu\
YSBhbGlxdWEuIFV0IGVuaW0gYWQgbWluaW0gdmVuaWFtLCBxdWlzIG5vc3RydWQgZXhlcmNpdGF0\
aW9uIHVsbGFtY28gbGFib3JpcyBuaXNpIHV0IGFsaXF1aXAgZXggZWEgY29tbW9kbyBjb25zZXF1\
YXQuIER1aXMgYXV0ZSBpcnVyZSBkb2xvciBpbiByZXByZWhlbmRlcml0IGluIHZvbHVwdGF0ZSB2\
ZWxpdCBlc3NlIGNpbGx1bSBkb2xvcmUgZXUgZnVnaWF0IG51bGxhIHBhcmlhdHVyLiBFeGNlcHRl\
dXIgc2ludCBvY2NhZWNhdCBjdXBpZGF0YXQgbm9uIHByb2lkZW50LCBzdW50IGluIGN1bHBhIHF1\
aSBvZmZpY2lhIGRlc2VydW50IG1vbGxpdCBhbmltIGlkIGVzdCBsYWJvcnVtLgo="
CUSTOM_L2_PACKET = [
{'custom': {'data': CUSTOM_DATA,
'layer': 'ethernet'}}
]
CUSTOM_PAYLOAD = [
{'ethernet': {'source': '10:94:00:00:aa:bb',
'destination': '10:94:00:00:bb:cc'}},
{'ipv4': {'source': '198.18.15.10',
'destination': '198.18.15.20'}},
'udp',
{'custom': {'data': CUSTOM_DATA,
'layer': 'payload'}}
]
with description('Packet Generator,', 'packet_generator') as self:
with description('REST API'):
with before.all:
service = Service(CONFIG.service())
self.process = service.start()
self.api = client.api.PacketGeneratorsApi(service.client())
if not check_modules_exists(service.client(), 'packet-generator'):
self.skip()
with description('invalid HTTP methods,'):
with description('/packet/generators,'):
with it('returns 405'):
expect(lambda: self.api.api_client.call_api('/packet/generators', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "DELETE, GET, POST"}))
with description('/packet/generator-results,'):
with it('returns 405'):
expect(lambda: self.api.api_client.call_api('/packet/generator-results', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "DELETE, GET"}))
with description('/packet/tx-flows,'):
with it('returns 405'):
expect(lambda: self.api.api_client.call_api('/packet/tx-flows', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET"}))
with description('list generators,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
with description('unfiltered,'):
with it('succeeds'):
generators = self.api.list_packet_generators()
expect(generators).not_to(be_empty)
for gen in generators:
expect(gen).to(be_valid_packet_generator)
with description('filtered,'):
with description('by target_id,'):
with it('returns an generator'):
generators = self.api.list_packet_generators(target_id=self.generator.target_id)
expect(generators).not_to(be_empty)
for ana in generators:
expect(ana).to(be_valid_packet_generator)
expect([ a for a in generators if a.id == self.generator.id ]).not_to(be_empty)
with description('non-existent target_id,'):
with it('returns no generators'):
generators = self.api.list_packet_generators(target_id='foo')
expect(generators).to(be_empty)
with description('get generator,'):
with description('by existing generator id,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
with it('succeeds'):
expect(self.api.get_packet_generator(self.generator.id)).to(be_valid_packet_generator)
with description('non-existent generator,'):
with it('returns 404'):
expect(lambda: self.api.get_packet_generator('foo')).to(raise_api_exception(404))
with description('invalid generator id,'):
with it('returns 404'):
expect(lambda: self.api.get_packet_generator(':bar:')).to(raise_api_exception(404))
with description('create generator,'):
with description('valid config,'):
with description('without modifiers,'):
with it('succeeds'):
gen = packet_generator_model(self.api.api_client)
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with modifiers,'):
with description('with sequence modifiers'):
with it('succeeds'):
template = default_traffic_packet_template_with_seq_modifiers()
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with permuted sequence modifiers,'):
with it('succeeds'):
template = default_traffic_packet_template_with_seq_modifiers(permute_flag=True)
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with list modifiers'):
with it('succeeds'):
template = default_traffic_packet_template_with_list_modifiers()
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with permuted list modifiers,'):
with it('succeeds'):
template = default_traffic_packet_template_with_list_modifiers(permute_flag=True)
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with signatures enabled,'):
with it('succeeds'):
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].signature = client.models.SpirentSignature(
stream_id=1, latency='start_of_frame')
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with custom packet,'):
with it('succeeds'):
template = make_traffic_template(CUSTOM_L2_PACKET)
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('with custom payload,'):
with it('succeeds'):
template = make_traffic_template(CUSTOM_PAYLOAD)
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
result = self.api.create_packet_generator(gen)
expect(result).to(be_valid_packet_generator)
with description('invalid config'):
with description('empty target id,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.target_id = None
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('non-existent target id,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.target_id = 'foo'
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid ordering'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.order = 'foo'
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid load,'):
with description('invalid schema,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.load.rate = -1
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid rate,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.load.rate.value = -1
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid duration,'):
with description('empty duration object,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.duration = client.models.TrafficDuration()
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('negative frame count,'):
with it('returns 400'):
duration = client.models.TrafficDuration()
duration.frames = -1;
gen = packet_generator_model(self.api.api_client)
gen.config.duration = duration
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid time object,'):
with description('negative time,'):
with it('returns 400'):
time = client.models.TrafficDurationTime()
time.value = -1;
time.units = "seconds"
duration = client.models.TrafficDuration()
duration.time = time
gen = packet_generator_model(self.api.api_client)
gen.config.duration = duration
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('bogus units,'):
with it('returns 400'):
time = client.models.TrafficDurationTime()
time.value = 10;
time.units = "foobars"
duration = client.models.TrafficDuration()
duration.time = time
gen = packet_generator_model(self.api.api_client)
gen.config.duration = duration
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid traffic definition,'):
with description('no traffic definition,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.traffic = []
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid packet,'):
with description('invalid modifier tie,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet.modifier_tie = 'foo'
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid address,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet.protocols[0].ethernet.source = 'foo'
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid length,'):
with description('invalid fixed length,'):
with it('returns 400'):
length = client.models.TrafficLength()
length.fixed = 16
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].length = length
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid list length,'):
with it('returns 400'):
length = client.models.TrafficLength()
length.list = [128, 256, 512, 0]
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].length = length
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid sequence length,'):
with description('invalid count,'):
with it('returns 400'):
seq = client.models.TrafficLengthSequence()
seq.count = 0
seq.start = 128
length = client.models.TrafficLength()
length.sequence = seq
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].length = length
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid start,'):
with it('returns 400'):
seq = client.models.TrafficLengthSequence()
seq.count = 10
seq.start = 0
length = client.models.TrafficLength()
length.sequence = seq
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].length = length
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid weight,'):
with it('returns 400'):
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].weight = -1
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('invalid modifiers,'):
with description('too many flows,'):
with it('returns 400'):
# total flows = 65536^3 which exceeds our flow limit of (1 << 48) - 1
template = default_traffic_packet_template_with_seq_modifiers()
template.protocols[0].modifiers.items[0].mac.sequence.count = 65536
template.protocols[1].modifiers.items[0].ipv4.sequence.count = 65536
template.protocols[1].modifiers.items[1].ipv4.sequence.count = 65536
template.protocols[1].modifiers.tie = 'cartesian'
template.modifier_tie = 'cartesian'
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('too many signature flows,'):
with it('returns 400'):
# total flows = 256^2 which exceeds our signature flow limit of 64k - 1
template = default_traffic_packet_template_with_seq_modifiers()
template.protocols[1].modifiers.items[0].ipv4.sequence.count = 256
template.protocols[1].modifiers.items[1].ipv4.sequence.count = 256
template.protocols[1].modifiers.tie = 'cartesian'
gen = packet_generator_model(self.api.api_client)
gen.config.traffic[0].packet = template
gen.config.traffic[0].signature = client.models.SpirentSignature(
stream_id=1, latency='start_of_frame')
expect(lambda: self.api.create_packet_generator(gen)).to(raise_api_exception(400))
with description('delete generator,'):
with description('by existing generator id,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
with it('succeeds'):
self.api.delete_packet_generator(self.generator.id)
expect(self.api.list_packet_generators()).to(be_empty)
with description('non-existent generator id,'):
with it('succeeds'):
self.api.delete_packet_generator('foo')
with description('invalid generator id,'):
with it('returns 404'):
expect(lambda: self.api.delete_packet_generator("invalid_id")).to(raise_api_exception(404))
with description('start generator,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
with description('by existing generator id,'):
with it('succeeds'):
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
with description('non-existent generator id,'):
with it('returns 404'):
expect(lambda: self.api.start_packet_generator('foo')).to(raise_api_exception(404))
with description('stop running generator,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
with description('by generator id,'):
with it('succeeds'):
gen = self.api.get_packet_generator(self.generator.id)
expect(gen).to(be_valid_packet_generator)
expect(gen.active).to(be_true)
self.api.stop_packet_generator(self.generator.id)
gen = self.api.get_packet_generator(self.generator.id)
expect(gen).to(be_valid_packet_generator)
expect(gen.active).to(be_false)
results = self.api.list_packet_generator_results(generator_id=self.generator.id)
expect(results).not_to(be_empty)
for result in results:
expect(result).to(be_valid_packet_generator_result)
expect(result.active).to(be_false)
with description('restart generator, '):
with it('succeeds'):
self.api.stop_packet_generator(self.generator.id)
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
expect(result.active).to(be_true)
# We should now have two results: one active, one inactive
results = self.api.list_packet_generator_results(generator_id=self.generator.id)
expect(results).not_to(be_empty)
for result in results:
expect(result).to(be_valid_packet_generator_result)
expect([r for r in results if r.active is True]).not_to(be_empty)
expect([r for r in results if r.active is False]).not_to(be_empty)
with description('toggle generators,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
expect(result.active).to(be_true)
self.result = result
with description('two valid generators,'):
with it('succeeds'):
newgen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(newgen).to(be_valid_packet_generator)
expect(newgen.id).not_to(equal(self.generator.id))
toggle = client.models.TogglePacketGeneratorsRequest()
toggle.replace = self.generator.id
toggle._with = newgen.id
result1 = self.api.toggle_packet_generators(toggle)
expect(result1).to(be_valid_packet_generator_result)
expect(result1.active).to(be_true)
result2 = self.api.get_packet_generator_result(self.result.id)
expect(result2).to(be_valid_packet_generator_result)
expect(result2.active).to(be_false)
with description('non-existent generator,'):
with it('returns 400'):
toggle = client.models.TogglePacketGeneratorsRequest()
toggle.replace = self.generator.id
toggle._with = 'foo'
expect(lambda: self.api.toggle_packet_generators(toggle)).to(raise_api_exception(404))
with description('list generator results,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
with description('unfiltered,'):
with it('succeeds'):
results = self.api.list_packet_generator_results()
expect(results).not_to(be_empty)
for result in results:
expect(result).to(be_valid_packet_generator_result)
with description('by generator id,'):
with it('succeeds'):
results = self.api.list_packet_generator_results(generator_id=self.generator.id)
for result in results:
expect(result).to(be_valid_packet_generator_result)
expect([ r for r in results if r.generator_id == self.generator.id ]).not_to(be_empty)
with description('non-existent generator id,'):
with it('returns no results'):
results = self.api.list_packet_generator_results(generator_id='foo')
expect(results).to(be_empty)
with description('by target id,'):
with it('succeeds'):
results = self.api.list_packet_generator_results(target_id=get_first_port_id(self.api.api_client))
expect(results).not_to(be_empty)
with description('non-existent target id,'):
with it('returns no results'):
results = self.api.list_packet_generator_results(target_id='bar')
expect(results).to(be_empty)
with description('list tx flows,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
self.result = result
with description('unfiltered,'):
with it('succeeds'):
flows = self.api.list_tx_flows()
expect(flows).not_to(be_empty)
for flow in flows:
expect(flow).to(be_valid_transmit_flow)
with description('filtered,'):
with description('by target_id,'):
with it('returns tx flows'):
flows = self.api.list_tx_flows(target_id=self.generator.target_id)
expect(flows).not_to(be_empty)
for flow in flows:
expect(flow).to(be_valid_transmit_flow)
expect([f for f in flows if flow.generator_result_id == self.result.id]).not_to(be_empty)
with description('non-existent target_id,'):
with it('returns no flows'):
flows = self.api.list_packet_generators(target_id='foo')
expect(flows).to(be_empty)
with description('by generator_id,'):
with it('returns tx flows'):
flows = self.api.list_tx_flows(generator_id=self.generator.id)
expect(flows).not_to(be_empty)
for flow in flows:
expect(flow).to(be_valid_transmit_flow)
# Get generator result of tx flow
result = self.api.get_packet_generator_result(id=flow.generator_result_id)
expect(result).to(be_valid_packet_generator_result)
# Result generator id should match self generator id
expect(result.generator_id == self.generator.id).to(be_true)
with description('non-existent generator_id,'):
with it('returns no flows'):
flows = self.api.list_packet_generators(target_id='bar')
expect(flows).to(be_empty)
with description('get tx flow,'):
with before.each:
gen = self.api.create_packet_generator(packet_generator_model(self.api.api_client))
expect(gen).to(be_valid_packet_generator)
self.generator = gen
result = self.api.start_packet_generator(self.generator.id)
expect(result).to(be_valid_packet_generator_result)
self.result = result
with description('by flow id,'):
with it('returns tx flow'):
result = self.api.get_packet_generator_result(self.result.id)
for flow_id in result.flows:
flow = self.api.get_tx_flow(flow_id)
expect(flow).to(be_valid_transmit_flow)
with description('non-existent id,'):
with it('returns 404'):
expect(lambda: self.api.get_tx_flow('foo')).to(raise_api_exception(404))
with description('invalid generator id,'):
with it('returns 404'):
expect(lambda: self.api.get_packet_generator(':bar:')).to(raise_api_exception(404))
with description('bulk operations,'):
with description('bulk create,'):
with description('valid request,'):
with it('succeeds'):
request = client.models.BulkCreatePacketGeneratorsRequest()
request.items = packet_generator_models(self.api.api_client)
reply = self.api.bulk_create_packet_generators(request)
expect(reply.items).to(have_len(len(request.items)))
for item in reply.items:
expect(item).to(be_valid_packet_generator)
with description('invalid requests,'):
with it('returns 400 for invalid config'):
request = client.models.BulkCreatePacketGeneratorsRequest()
request.items = packet_generator_models(self.api.api_client)
request.items[-1].config.load.rate.value = -1
expect(lambda: self.api.bulk_create_packet_generators(request)).to(raise_api_exception(400))
expect(self.api.list_packet_generators()).to(be_empty)
with it('returns 404 for invalid id'):
request = client.models.BulkCreatePacketGeneratorsRequest()
request.items = packet_generator_models(self.api.api_client)
request.items[-1].id = ':foo'
expect(lambda: self.api.bulk_create_packet_generators(request)).to(raise_api_exception(404))
expect(self.api.list_packet_generators()).to(be_empty)
with description('bulk delete,'):
with before.each:
request = client.models.BulkCreatePacketGeneratorsRequest()
request.items = packet_generator_models(self.api.api_client)
reply = self.api.bulk_create_packet_generators(request)
expect(reply.items).to(have_len(len(request.items)))
for item in reply.items:
expect(item).to(be_valid_packet_generator)
with description('valid request,'):
with it('succeeds'):
self.api.bulk_delete_packet_generators(
client.models.BulkDeletePacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()]))
expect(self.api.list_packet_generators()).to(be_empty)
with description('invalid requests,'):
with it('succeeds with a non-existent id'):
self.api.bulk_delete_packet_generators(
client.models.BulkDeletePacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()] + ['foo']))
expect(self.api.list_packet_generators()).to(be_empty)
with it('returns 404 for an invalid id'):
expect(lambda: self.api.bulk_delete_packet_generators(
client.models.BulkDeletePacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()] + [':bar']))).to(
raise_api_exception(404))
expect(self.api.list_packet_generators()).not_to(be_empty)
with description('bulk start,'):
with before.each:
request = client.models.BulkCreatePacketGeneratorsRequest()
request.items = packet_generator_models(self.api.api_client)
reply = self.api.bulk_create_packet_generators(request)
expect(reply.items).to(have_len(len(request.items)))
for item in reply.items:
expect(item).to(be_valid_packet_generator)
with description('valid request,'):
with it('succeeds'):
reply = self.api.bulk_start_packet_generators(
client.models.BulkStartPacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()]))
expect(reply.items).to(have_len(len(self.api.list_packet_generators())))
for item in reply.items:
expect(item).to(be_valid_packet_generator_result)
expect(item.active).to(be_true)
with description('invalid requests,'):
with it('returns 404 for non-existent id'):
expect(lambda: self.api.bulk_start_packet_generators(
client.models.BulkStartPacketGeneratorsRequest(
[ana.id for ana in self.api.list_packet_generators()] + ['foo']))).to(
raise_api_exception(404))
for ana in self.api.list_packet_generators():
expect(ana.active).to(be_false)
with it('returns 404 for invalid id'):
expect(lambda: self.api.bulk_start_packet_generators(
client.models.BulkStartPacketGeneratorsRequest(
[ana.id for ana in self.api.list_packet_generators()] + [':bar']))).to(
raise_api_exception(404))
for ana in self.api.list_packet_generators():
expect(ana.active).to(be_false)
with description('bulk stop,'):
with before.each:
create_request = client.models.BulkCreatePacketGeneratorsRequest()
create_request.items = packet_generator_models(self.api.api_client)
create_reply = self.api.bulk_create_packet_generators(create_request)
expect(create_reply.items).to(have_len(len(create_request.items)))
for item in create_reply.items:
expect(item).to(be_valid_packet_generator)
start_reply = self.api.bulk_start_packet_generators(
client.models.BulkStartPacketGeneratorsRequest(
[gen.id for gen in create_reply.items]))
expect(start_reply.items).to(have_len(len(create_request.items)))
for item in start_reply.items:
expect(item).to(be_valid_packet_generator_result)
expect(item.active).to(be_true)
with description('valid request,'):
with it('succeeds'):
self.api.bulk_stop_packet_generators(
client.models.BulkStopPacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()]))
generators = self.api.list_packet_generators()
expect(generators).not_to(be_empty)
for gen in generators:
expect(gen.active).to(be_false)
with description('invalid requests,'):
with it('succeeds with a non-existent id'):
self.api.bulk_stop_packet_generators(
client.models.BulkStopPacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()] + ['foo']))
generators = self.api.list_packet_generators()
expect(generators).not_to(be_empty)
for gen in generators:
expect(gen.active).to(be_false)
with it('returns 404 for an invalid id'):
expect(lambda: self.api.bulk_stop_packet_generators(
client.models.BulkStopPacketGeneratorsRequest(
[gen.id for gen in self.api.list_packet_generators()] + [':bar']))).to(
raise_api_exception(404))
generators = self.api.list_packet_generators()
expect(generators).not_to(be_empty)
for gen in generators:
expect(gen.active).to(be_true)
with after.each:
try:
for gen in self.api.list_packet_generators():
if gen.active:
self.api.stop_packet_generator(gen.id)
self.api.delete_packet_generators()
except AttributeError:
pass
self.generator = None
self.result = None
with after.all:
try:
self.process.terminate()
self.process.wait()
except AttributeError:
pass
| 55.529086
| 118
| 0.539285
| 3,880
| 40,092
| 5.355155
| 0.059278
| 0.055925
| 0.02262
| 0.036192
| 0.837905
| 0.815767
| 0.796179
| 0.756762
| 0.72827
| 0.697035
| 0
| 0.016645
| 0.372144
| 40,092
| 721
| 119
| 55.606103
| 0.808795
| 0.006909
| 0
| 0.63252
| 0
| 0
| 0.078198
| 0.001281
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003252
| 0.014634
| 0
| 0.014634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cc0f7796f41a71fa6dbfa02172200375f866f32d
| 8,251
|
py
|
Python
|
RCDL_implementation/process_temporal_constant_feature.py
|
llin-csiss/RCDL
|
30dcb0e29329e3813296c42dca7f38c4136907ec
|
[
"MIT"
] | null | null | null |
RCDL_implementation/process_temporal_constant_feature.py
|
llin-csiss/RCDL
|
30dcb0e29329e3813296c42dca7f38c4136907ec
|
[
"MIT"
] | null | null | null |
RCDL_implementation/process_temporal_constant_feature.py
|
llin-csiss/RCDL
|
30dcb0e29329e3813296c42dca7f38c4136907ec
|
[
"MIT"
] | 1
|
2022-03-11T15:07:50.000Z
|
2022-03-11T15:07:50.000Z
|
# -*- coding: utf-8 -*-
"""
Generated by ArcGIS ModelBuilder on : 2021-10-14 11:27:01
"""
import arcpy
from sys import argv
def # NOT IMPLEMENTED# Function Body not implemented
def hist_appear_constant_feature(String="Z:\\nifa\\workdir\\process\\2017\\"): # hist_appear_constant_feature
# To allow overwriting outputs change overwriteOutput option to True.
arcpy.env.overwriteOutput = False
# Check out any necessary licenses.
arcpy.CheckOutExtension("spatial")
arcpy.CheckOutExtension("ImageAnalyst")
# Model Environment settings
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_String_hist = f"{String}hist"
Input_true_raster_or_constant_value = 1
Input_false_raster_or_constant_value = 0
for CDL_2009_31109_tif, Name in # NOT IMPLEMENTED(_String_hist, "", "", "NOT_RECURSIVE"):
# Process: Con (Con) (ia)
Name = "CDL_2016.tif"
_Name_111_1_tif = fr"{String}hist_const_freq\{Name}111_1.tif"
Con = _Name_111_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_111_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 111")
_Name_111_1_tif.save(Con)
# Process: Con (2) (Con) (ia)
_Name_083_1_tif = fr"{String}hist_const_freq\{Name}083_1.tif"
Con_2_ = _Name_083_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_083_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 83")
_Name_083_1_tif.save(Con_2_)
# Process: Con (3) (Con) (ia)
_Name_063_1_tif = fr"{String}hist_const_freq\{Name}063_1.tif"
Con_3_ = _Name_063_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_063_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 63")
_Name_063_1_tif.save(Con_3_)
# Process: Con (4) (Con) (ia)
_Name_141_1_tif = fr"{String}hist_const_freq\{Name}141_1.tif"
Con_4_ = _Name_141_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_141_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 141")
_Name_141_1_tif.save(Con_4_)
# Process: Con (5) (Con) (ia)
_Name_142_1_tif = fr"{String}hist_const_freq\{Name}142_1.tif"
Con_5_ = _Name_142_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_142_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 142")
_Name_142_1_tif.save(Con_5_)
# Process: Con (6) (Con) (ia)
_Name_143_1_tif = fr"{String}hist_const_freq\{Name}143_1.tif"
Con_6_ = _Name_143_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_143_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 143")
_Name_143_1_tif.save(Con_6_)
# Process: Con (7) (Con) (ia)
_Name_123_1_tif = fr"{String}hist_const_freq\{Name}123_1.tif"
Con_7_ = _Name_123_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_123_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 123")
_Name_123_1_tif.save(Con_7_)
# Process: Con (8) (Con) (ia)
_Name_122_1_tif = fr"{String}hist_const_freq\{Name}122_1.tif"
Con_8_ = _Name_122_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_122_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 122")
_Name_122_1_tif.save(Con_8_)
# Process: Con (9) (Con) (ia)
_Name_121_1_tif = fr"{String}hist_const_freq\{Name}121_1.tif"
Con_9_ = _Name_121_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_121_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 121")
_Name_121_1_tif.save(Con_9_)
# Process: Con (10) (Con) (ia)
_Name_082_1_tif = fr"{String}hist_const_freq\{Name}082_1.tif"
Con_10_ = _Name_082_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_082_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 82")
_Name_082_1_tif.save(Con_10_)
# Process: Con (11) (Con) (ia)
_Name_124_1_tif = fr"{String}hist_const_freq\{Name}124_1.tif"
Con_11_ = _Name_124_1_tif
with arcpy.EnvManager(scratchWorkspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb", workspace=r"C:\Users\linli_home\Documents\ArcGIS\Projects\nifa2\nifa2.gdb"):
_Name_124_1_tif = arcpy.ia.Con(in_conditional_raster=CDL_2009_31109_tif, in_true_raster_or_constant=Input_true_raster_or_constant_value, in_false_raster_or_constant=Input_false_raster_or_constant_value, where_clause="VALUE = 124")
_Name_124_1_tif.save(Con_11_)
if __name__ == '__main__':
hist_appear_constant_feature(*argv[1:])
| 70.521368
| 247
| 0.722215
| 1,216
| 8,251
| 4.429276
| 0.095395
| 0.040847
| 0.136651
| 0.053472
| 0.763832
| 0.763832
| 0.752507
| 0.752507
| 0.693279
| 0.693279
| 0
| 0.068874
| 0.176463
| 8,251
| 116
| 248
| 71.12931
| 0.723767
| 0.071628
| 0
| 0.171429
| 1
| 0
| 0.281003
| 0.258346
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.028571
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04247ad6dd616c18ff804da74885789933c0e157
| 32
|
py
|
Python
|
qulab/drivers/PG_AWG/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
qulab/drivers/PG_AWG/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
qulab/drivers/PG_AWG/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
from .AWG_Driver import Driver
| 16
| 31
| 0.8125
| 5
| 32
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0440f8fb39e1b5c1e0da71652dfb17e9cab04dcb
| 17,667
|
py
|
Python
|
irctest/server_tests/test_labeled_responses.py
|
delthas/irctest
|
c12c44b9938986608a8114cc21f1b5719cd110cb
|
[
"MIT"
] | 8
|
2017-11-01T17:43:13.000Z
|
2022-01-30T08:21:50.000Z
|
irctest/server_tests/test_labeled_responses.py
|
delthas/irctest
|
c12c44b9938986608a8114cc21f1b5719cd110cb
|
[
"MIT"
] | 32
|
2016-12-01T09:23:58.000Z
|
2020-09-23T05:48:01.000Z
|
irctest/server_tests/test_labeled_responses.py
|
delthas/irctest
|
c12c44b9938986608a8114cc21f1b5719cd110cb
|
[
"MIT"
] | 3
|
2017-11-14T03:54:39.000Z
|
2020-09-09T06:47:57.000Z
|
"""
<https://ircv3.net/specs/extensions/labeled-response.html>
"""
import re
from irctest import cases
class LabeledResponsesTestCase(cases.BaseServerTestCase, cases.OptionalityHelper):
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledPrivmsgResponsesToMultipleClients(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(2)
self.connectClient('carl', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(3)
self.connectClient('alice', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(4)
self.sendLine(1, '@label=12345 PRIVMSG bar,carl,alice :hi')
m = self.getMessage(1)
m2 = self.getMessage(2)
m3 = self.getMessage(3)
m4 = self.getMessage(4)
# ensure the label isn't sent to recipients
self.assertMessageEqual(m2, command='PRIVMSG', fail_msg='No PRIVMSG received by target 1 after sending one out')
self.assertNotIn('label', m2.tags, m2, fail_msg="When sending a PRIVMSG with a label, the target users shouldn't receive the label (only the sending user should): {msg}")
self.assertMessageEqual(m3, command='PRIVMSG', fail_msg='No PRIVMSG received by target 1 after sending one out')
self.assertNotIn('label', m3.tags, m3, fail_msg="When sending a PRIVMSG with a label, the target users shouldn't receive the label (only the sending user should): {msg}")
self.assertMessageEqual(m4, command='PRIVMSG', fail_msg='No PRIVMSG received by target 1 after sending one out')
self.assertNotIn('label', m4.tags, m4, fail_msg="When sending a PRIVMSG with a label, the target users shouldn't receive the label (only the sending user should): {msg}")
self.assertMessageEqual(m, command='BATCH', fail_msg='No BATCH echo received after sending one out')
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledPrivmsgResponsesToClient(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(2)
self.sendLine(1, '@label=12345 PRIVMSG bar :hi')
m = self.getMessage(1)
m2 = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(m2, command='PRIVMSG', fail_msg='No PRIVMSG received by the target after sending one out')
self.assertNotIn('label', m2.tags, m2, fail_msg="When sending a PRIVMSG with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
self.assertMessageEqual(m, command='PRIVMSG', fail_msg='No PRIVMSG echo received after sending one out')
self.assertIn('label', m.tags, m, fail_msg="When sending a PRIVMSG with a label, the echo'd message didn't contain the label at all: {msg}")
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd PRIVMSG to a client did not contain the same label we sent it with(should be '12345'): {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledPrivmsgResponsesToChannel(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(2)
# join channels
self.sendLine(1, 'JOIN #test')
self.getMessages(1)
self.sendLine(2, 'JOIN #test')
self.getMessages(2)
self.getMessages(1)
self.sendLine(1, '@label=12345;+draft/reply=123;+draft/react=l😃l PRIVMSG #test :hi')
ms = self.getMessage(1)
mt = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(mt, command='PRIVMSG', fail_msg='No PRIVMSG received by the target after sending one out')
self.assertNotIn('label', mt.tags, mt, fail_msg="When sending a PRIVMSG with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
# ensure sender correctly receives msg
self.assertMessageEqual(ms, command='PRIVMSG', fail_msg="Got a message back that wasn't a PRIVMSG")
self.assertIn('label', ms.tags, ms, fail_msg="When sending a PRIVMSG with a label, the source user should receive the label but didn't: {msg}")
self.assertEqual(ms.tags['label'], '12345', ms, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledPrivmsgResponsesToSelf(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.sendLine(1, '@label=12345 PRIVMSG foo :hi')
m1 = self.getMessage(1)
m2 = self.getMessage(1)
number_of_labels = 0
for m in [m1, m2]:
self.assertMessageEqual(m, command='PRIVMSG', fail_msg="Got a message back that wasn't a PRIVMSG")
if 'label' in m.tags:
number_of_labels += 1
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
self.assertEqual(number_of_labels, 1, m1, fail_msg="When sending a PRIVMSG to self with echo-message, we only expect one message to contain the label. Instead, {} messages had the label".format(number_of_labels))
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledNoticeResponsesToClient(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(2)
self.sendLine(1, '@label=12345 NOTICE bar :hi')
m = self.getMessage(1)
m2 = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(m2, command='NOTICE', fail_msg='No NOTICE received by the target after sending one out')
self.assertNotIn('label', m2.tags, m2, fail_msg="When sending a NOTICE with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
self.assertMessageEqual(m, command='NOTICE', fail_msg='No NOTICE echo received after sending one out')
self.assertIn('label', m.tags, m, fail_msg="When sending a NOTICE with a label, the echo'd message didn't contain the label at all: {msg}")
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd NOTICE to a client did not contain the same label we sent it with(should be '12345'): {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledNoticeResponsesToChannel(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(2)
# join channels
self.sendLine(1, 'JOIN #test')
self.getMessages(1)
self.sendLine(2, 'JOIN #test')
self.getMessages(2)
self.getMessages(1)
self.sendLine(1, '@label=12345;+draft/reply=123;+draft/react=l😃l NOTICE #test :hi')
ms = self.getMessage(1)
mt = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(mt, command='NOTICE', fail_msg='No NOTICE received by the target after sending one out')
self.assertNotIn('label', mt.tags, mt, fail_msg="When sending a NOTICE with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
# ensure sender correctly receives msg
self.assertMessageEqual(ms, command='NOTICE', fail_msg="Got a message back that wasn't a NOTICE")
self.assertIn('label', ms.tags, ms, fail_msg="When sending a NOTICE with a label, the source user should receive the label but didn't: {msg}")
self.assertEqual(ms.tags['label'], '12345', ms, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledNoticeResponsesToSelf(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response'], skip_if_cap_nak=True)
self.getMessages(1)
self.sendLine(1, '@label=12345 NOTICE foo :hi')
m1 = self.getMessage(1)
m2 = self.getMessage(1)
number_of_labels = 0
for m in [m1, m2]:
self.assertMessageEqual(m, command='NOTICE', fail_msg="Got a message back that wasn't a NOTICE")
if 'label' in m.tags:
number_of_labels += 1
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
self.assertEqual(number_of_labels, 1, m1, fail_msg="When sending a NOTICE to self with echo-message, we only expect one message to contain the label. Instead, {} messages had the label".format(number_of_labels))
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledTagMsgResponsesToClient(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response', 'message-tags'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response', 'message-tags'], skip_if_cap_nak=True)
self.getMessages(2)
self.sendLine(1, '@label=12345;+draft/reply=123;+draft/react=l😃l TAGMSG bar')
m = self.getMessage(1)
m2 = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(m2, command='TAGMSG', fail_msg='No TAGMSG received by the target after sending one out')
self.assertNotIn('label', m2.tags, m2, fail_msg="When sending a TAGMSG with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
self.assertIn('+draft/reply', m2.tags, m2, fail_msg="Reply tag wasn't present on the target user's TAGMSG: {msg}")
self.assertEqual(m2.tags['+draft/reply'], '123', m2, fail_msg="Reply tag wasn't the same on the target user's TAGMSG: {msg}")
self.assertIn('+draft/react', m2.tags, m2, fail_msg="React tag wasn't present on the target user's TAGMSG: {msg}")
self.assertEqual(m2.tags['+draft/react'], 'l😃l', m2, fail_msg="React tag wasn't the same on the target user's TAGMSG: {msg}")
self.assertMessageEqual(m, command='TAGMSG', fail_msg='No TAGMSG echo received after sending one out')
self.assertIn('label', m.tags, m, fail_msg="When sending a TAGMSG with a label, the echo'd message didn't contain the label at all: {msg}")
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd TAGMSG to a client did not contain the same label we sent it with(should be '12345'): {msg}")
self.assertIn('+draft/reply', m.tags, m, fail_msg="Reply tag wasn't present on the source user's TAGMSG: {msg}")
self.assertEqual(m2.tags['+draft/reply'], '123', m, fail_msg="Reply tag wasn't the same on the source user's TAGMSG: {msg}")
self.assertIn('+draft/react', m.tags, m, fail_msg="React tag wasn't present on the source user's TAGMSG: {msg}")
self.assertEqual(m2.tags['+draft/react'], 'l😃l', m, fail_msg="React tag wasn't the same on the source user's TAGMSG: {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledTagMsgResponsesToChannel(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response', 'message-tags'], skip_if_cap_nak=True)
self.getMessages(1)
self.connectClient('bar', capabilities=['batch', 'echo-message', 'labeled-response', 'message-tags'], skip_if_cap_nak=True)
self.getMessages(2)
# join channels
self.sendLine(1, 'JOIN #test')
self.getMessages(1)
self.sendLine(2, 'JOIN #test')
self.getMessages(2)
self.getMessages(1)
self.sendLine(1, '@label=12345;+draft/reply=123;+draft/react=l😃l TAGMSG #test')
ms = self.getMessage(1)
mt = self.getMessage(2)
# ensure the label isn't sent to recipient
self.assertMessageEqual(mt, command='TAGMSG', fail_msg='No TAGMSG received by the target after sending one out')
self.assertNotIn('label', mt.tags, mt, fail_msg="When sending a TAGMSG with a label, the target user shouldn't receive the label (only the sending user should): {msg}")
# ensure sender correctly receives msg
self.assertMessageEqual(ms, command='TAGMSG', fail_msg="Got a message back that wasn't a TAGMSG")
self.assertIn('label', ms.tags, ms, fail_msg="When sending a TAGMSG with a label, the source user should receive the label but didn't: {msg}")
self.assertEqual(ms.tags['label'], '12345', ms, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testLabeledTagMsgResponsesToSelf(self):
self.connectClient('foo', capabilities=['batch', 'echo-message', 'labeled-response', 'message-tags'], skip_if_cap_nak=True)
self.getMessages(1)
self.sendLine(1, '@label=12345;+draft/reply=123;+draft/react=l😃l TAGMSG foo')
m1 = self.getMessage(1)
m2 = self.getMessage(1)
number_of_labels = 0
for m in [m1, m2]:
self.assertMessageEqual(m, command='TAGMSG', fail_msg="Got a message back that wasn't a TAGMSG")
if 'label' in m.tags:
number_of_labels += 1
self.assertEqual(m.tags['label'], '12345', m, fail_msg="Echo'd label doesn't match the label we sent (should be '12345'): {msg}")
self.assertEqual(number_of_labels, 1, m1, fail_msg="When sending a TAGMSG to self with echo-message, we only expect one message to contain the label. Instead, {} messages had the label".format(number_of_labels))
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
def testBatchedJoinMessages(self):
self.connectClient('bar', capabilities=['batch', 'labeled-response', 'message-tags', 'server-time'], skip_if_cap_nak=True)
self.getMessages(1)
self.sendLine(1, '@label=12345 JOIN #xyz')
m = self.getMessages(1)
# we expect at least join and names lines, which must be batched
self.assertGreaterEqual(len(m), 3)
# valid BATCH start line:
batch_start = m[0]
self.assertMessageEqual(batch_start, command='BATCH')
self.assertEqual(len(batch_start.params), 2)
self.assertTrue(batch_start.params[0].startswith('+'), 'batch start param must begin with +, got %s' % (batch_start.params[0],))
batch_id = batch_start.params[0][1:]
# batch id MUST be alphanumerics and hyphens
self.assertTrue(re.match(r'^[A-Za-z0-9\-]+$', batch_id) is not None, 'batch id must be alphanumerics and hyphens, got %r' % (batch_id,))
self.assertEqual(batch_start.params[1], 'labeled-response')
self.assertEqual(batch_start.tags.get('label'), '12345')
# valid BATCH end line
batch_end = m[-1]
self.assertMessageEqual(batch_end, command='BATCH', params=['-' + batch_id])
# messages must have the BATCH tag
for message in m[1:-1]:
self.assertEqual(message.tags.get('batch'), batch_id)
@cases.SpecificationSelector.requiredBySpecification('Oragono')
def testNoBatchForSingleMessage(self):
self.connectClient('bar', capabilities=['batch', 'labeled-response', 'message-tags', 'server-time'])
self.getMessages(1)
self.sendLine(1, '@label=98765 PING adhoctestline')
# no BATCH should be initiated for a one-line response, it should just be labeled
ms = self.getMessages(1)
self.assertEqual(len(ms), 1)
m = ms[0]
self.assertMessageEqual(m, command='PONG', params=['adhoctestline'])
# check the label
self.assertEqual(m.tags.get('label'), '98765')
@cases.SpecificationSelector.requiredBySpecification('Oragono')
def testEmptyBatchForNoResponse(self):
self.connectClient('bar', capabilities=['batch', 'labeled-response', 'message-tags', 'server-time'])
self.getMessages(1)
# PONG never receives a response
self.sendLine(1, '@label=98765 PONG adhoctestline')
# labeled-response: "Servers MUST respond with a labeled
# `ACK` message when a client sends a labeled command that normally
# produces no response."
ms = self.getMessages(1)
self.assertEqual(len(ms), 1)
ack = ms[0]
self.assertEqual(ack.command, 'ACK')
self.assertEqual(ack.tags.get('label'), '98765')
| 59.285235
| 220
| 0.677534
| 2,408
| 17,667
| 4.910299
| 0.08098
| 0.031969
| 0.02977
| 0.020298
| 0.847767
| 0.830176
| 0.8125
| 0.787974
| 0.784591
| 0.770974
| 0
| 0.025783
| 0.194317
| 17,667
| 297
| 221
| 59.484848
| 0.804412
| 0.054056
| 0
| 0.511737
| 0
| 0.140845
| 0.377196
| 0.013788
| 0
| 0
| 0
| 0
| 0.323944
| 1
| 0.061033
| false
| 0
| 0.00939
| 0
| 0.075117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0984cf2306243d61a4b1adc668dc6698bcb59af
| 33
|
py
|
Python
|
deeplodocus/app/transforms/__init__.py
|
samuelwestlake/deeplodocus-dev
|
12b283ca4eb39abf13ddc56eabc78e01e90627ff
|
[
"MIT"
] | 2
|
2019-09-13T12:02:23.000Z
|
2022-03-11T13:46:35.000Z
|
deeplodocus/app/transforms/__init__.py
|
samuelwestlake/deeplodocus-dev
|
12b283ca4eb39abf13ddc56eabc78e01e90627ff
|
[
"MIT"
] | 11
|
2018-11-23T14:01:17.000Z
|
2019-09-16T15:25:07.000Z
|
deeplodocus/app/transforms/__init__.py
|
samuelwestlake/deeplodocus-dev
|
12b283ca4eb39abf13ddc56eabc78e01e90627ff
|
[
"MIT"
] | 4
|
2018-09-22T13:31:08.000Z
|
2018-12-05T18:34:46.000Z
|
def empty(x):
return x, None
| 11
| 18
| 0.606061
| 6
| 33
| 3.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 33
| 2
| 19
| 16.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f0a24e813bad46f30d1b3901a79af47f7f83005d
| 45
|
wsgi
|
Python
|
testxsendfile.wsgi
|
jhpyle/testxsendfile
|
8536770293b4e6cce545814b6e6804bc88342c68
|
[
"MIT"
] | null | null | null |
testxsendfile.wsgi
|
jhpyle/testxsendfile
|
8536770293b4e6cce545814b6e6804bc88342c68
|
[
"MIT"
] | null | null | null |
testxsendfile.wsgi
|
jhpyle/testxsendfile
|
8536770293b4e6cce545814b6e6804bc88342c68
|
[
"MIT"
] | null | null | null |
from testxsendfile import app as application
| 22.5
| 44
| 0.866667
| 6
| 45
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 1
| 45
| 45
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0c2d2e3bd49a535b9269cf12786ef9176753b94
| 6,445
|
py
|
Python
|
land_china/land_china/spiders/exprs.py
|
pythonyhd/finace
|
614d98ad92e1bbaa6cf7dc1d6dfaba4f24431688
|
[
"Apache-2.0"
] | 1
|
2020-08-18T01:55:14.000Z
|
2020-08-18T01:55:14.000Z
|
land_china/land_china/spiders/exprs.py
|
pythonyhd/finace
|
614d98ad92e1bbaa6cf7dc1d6dfaba4f24431688
|
[
"Apache-2.0"
] | null | null | null |
land_china/land_china/spiders/exprs.py
|
pythonyhd/finace
|
614d98ad92e1bbaa6cf7dc1d6dfaba4f24431688
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
xpath_list = [
{
"name": "行政区:",
'key': "region",
"expr": ["//div[@id='p1']//td/span[contains(text(),'行政区:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c2_ctrl']/text()"]
},
{
"name": "电子监管号:",
"key": "supervise_number",
"expr": ["//div[@id='p1']//td/span[contains(text(),'电子监管号:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c4_ctrl']/text()"]
},
{
"name": "项目名称",
"key": "project_name",
"expr": ["//div[@id='p1']//td/span[contains(text(),'项目名称:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r17_c2_ctrl']/text()"],
},
{
"name": "项目位置:",
"key": "project_location",
"expr": ["//div[@id='p1']//td/span[contains(text(),'项目位置:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r16_c2_ctrl']/text()"],
},
{
"name": "面积(公顷):",
"key": "acreage",
"expr": ["//div[@id='p1']//td/span[contains(text(),'面积(公顷):')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r2_c2_ctrl']/text()"]
},
{
"name": "土地来源:",
"key": "source",
"expr": ["//div[@id='p1']//td/span[contains(text(),'土地来源:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r2_c2_ctrl']/text()"],
},
{
"name": "土地用途:",
"key": "purpose",
"expr": ["//div[@id='p1']//td/span[contains(text(),'土地用途:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r3_c2_ctrl']/text()"]
},
{
"name": "供地方式:",
"key": "supply",
"expr": ["//div[@id='p1']//td/span[contains(text(),'供地方式:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r3_c4_ctrl']/text()"]
},
{
"name": "土地使用年限:",
"key": "soil_life",
"expr": ["//div[@id='p1']//td/span[contains(text(),'土地使用年限:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r19_c2_ctrl']/text()"]
},
{
"name": "行业分类:",
"key": "classification",
"expr": ["//div[@id='p1']//td/span[contains(text(),'行业分类:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r19_c4_ctrl']/text()"]
},
{
"name": "土地级别:",
"key": "soil_level",
"expr": ["//div[@id='p1']//td/span[contains(text(),'土地级别:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r20_c2_ctrl']/text()"]
},
{
"name": "成交价格(万元):",
"key": "price",
"expr": [
"//div[@id='p1']//td/span[contains(text(),'成交价格(万元):')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r20_c4_ctrl']/text()"]
},
{
"name": "土地使用权人:",
"key": "land_usage_right",
"expr": ["//div[@id='p1']//td/span[contains(text(),'土地使用权人:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r23_c2_ctrl']/text()"]
},
{
"name": "下限:",
"key": "lower_limit",
"expr": ["//div[@id='p1']//td/span[contains(text(),'下限:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f2_r1_c2_ctrl']/text()"]
},
{
"name": "上限:",
"key": "upper_limit",
"expr": ["//div[@id='p1']//td/span[contains(text(),'上限:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f2_r1_c4_ctrl']/text()"]
},
{
"name": "约定交地时间:",
"key": "appointed_deal_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'约定交地时间:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r21_c4_ctrl']/text()"]
},
{
"name": "约定开工时间:",
"key": "appointed_work_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'约定开工时间:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r22_c2_ctrl']/text()"]
},
{
"name": "约定竣工时间:",
"key": "appointed_achieve_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'约定竣工时间:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r22_c4_ctrl']/text()"]
},
{
"name": "实际开工时间:",
"key": "reality_work_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'实际开工时间:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r10_c2_ctrl']/text()"]
},
{
"name": "实际竣工时间:",
"key": "reality_achieve_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'实际竣工时间:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r22_c4_ctrl']/text()"]
},
{
"name": "批准单位:",
"key": "approved",
"expr": ["//div[@id='p1']//td/span[contains(text(),'批准单位:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r14_c2_ctrl']/text()"]
},
{
"name": "合同签订日期:",
"key": "contract_date",
"expr": ["//div[@id='p1']//td/span[contains(text(),'合同签订日期:')]/parent::td/following-sibling::td[1]/span/text()",
"//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r14_c4_ctrl']/text()"]
},
]
| 46.366906
| 120
| 0.553297
| 777
| 6,445
| 4.307593
| 0.118404
| 0.046011
| 0.059157
| 0.072304
| 0.79265
| 0.775919
| 0.775919
| 0.775919
| 0.654616
| 0.61458
| 0
| 0.079806
| 0.200931
| 6,445
| 138
| 121
| 46.702899
| 0.570097
| 0.003258
| 0
| 0.014815
| 0
| 0.162963
| 0.710838
| 0.614762
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0d2ae5c24cffe0c5aba312a6ac29eda25fb3977
| 193
|
py
|
Python
|
restic/__init__.py
|
jstzwj/PyRestic
|
4164e34da3a8333ea655a70cf3201a4141c67b33
|
[
"MIT"
] | 2
|
2019-12-26T07:52:56.000Z
|
2020-01-03T04:40:06.000Z
|
restic/__init__.py
|
jstzwj/PyRestic
|
4164e34da3a8333ea655a70cf3201a4141c67b33
|
[
"MIT"
] | null | null | null |
restic/__init__.py
|
jstzwj/PyRestic
|
4164e34da3a8333ea655a70cf3201a4141c67b33
|
[
"MIT"
] | 1
|
2021-03-13T22:39:11.000Z
|
2021-03-13T22:39:11.000Z
|
from restic.repo import Repo
from restic.snapshot import Snapshot
from restic.core import version, self_update, generate
from restic.config import restic_bin
from restic.test import test_all
| 24.125
| 54
| 0.839378
| 30
| 193
| 5.3
| 0.466667
| 0.314465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124352
| 193
| 7
| 55
| 27.571429
| 0.940828
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b11147bed1b0bbf88a68bf284cc8e910fa4b8a1
| 156
|
py
|
Python
|
app/audio/__init__.py
|
elmaghallawy/ManAudio-API
|
4945530081f12a90e4e431f0a60bafaa33430f5d
|
[
"MIT"
] | null | null | null |
app/audio/__init__.py
|
elmaghallawy/ManAudio-API
|
4945530081f12a90e4e431f0a60bafaa33430f5d
|
[
"MIT"
] | null | null | null |
app/audio/__init__.py
|
elmaghallawy/ManAudio-API
|
4945530081f12a90e4e431f0a60bafaa33430f5d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
audio = Blueprint('audio', __name__)
# we import audio views here to avoid circular dependancy issues
from . import views
| 15.6
| 64
| 0.762821
| 21
| 156
| 5.47619
| 0.666667
| 0.243478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185897
| 156
| 9
| 65
| 17.333333
| 0.905512
| 0.397436
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0b1da46ffb3109bab23407ae385b9b03d65e1293
| 25
|
py
|
Python
|
catkin_ws/install/lib/python2.7/dist-packages/rftest/msg/__init__.py
|
ggrabuskie/ros
|
8124ad3c6e6bc76977bef154c3cedd0a251409d0
|
[
"MIT"
] | null | null | null |
catkin_ws/install/lib/python2.7/dist-packages/rftest/msg/__init__.py
|
ggrabuskie/ros
|
8124ad3c6e6bc76977bef154c3cedd0a251409d0
|
[
"MIT"
] | null | null | null |
catkin_ws/install/lib/python2.7/dist-packages/rftest/msg/__init__.py
|
ggrabuskie/ros
|
8124ad3c6e6bc76977bef154c3cedd0a251409d0
|
[
"MIT"
] | null | null | null |
from ._Mobility import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bfbc3e8b6884c5d49b97b4d7b0157a58375e570
| 1,721
|
py
|
Python
|
tests/unit/saltenv/cli/test_unit_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 5
|
2022-03-25T17:15:04.000Z
|
2022-03-28T23:24:26.000Z
|
tests/unit/saltenv/cli/test_unit_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/saltenv/cli/test_unit_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 2
|
2022-03-26T06:33:30.000Z
|
2022-03-29T19:43:50.000Z
|
from pathlib import Path
async def test_unit_version_exists(mock_hub, hub, capfd, tmp_path):
"""
SCENARIO #1:
- There is a current version
"""
# Link the function to the mock_hub
mock_hub.saltenv.cli.version = hub.saltenv.cli.version
# Mock the get_current_version function to return a mock version
mock_curr_version = ("3001", Path(tmp_path) / "3001")
mock_hub.saltenv.ops.get_current_version.return_value = mock_curr_version
# Call version
await mock_hub.saltenv.cli.version()
# Check that the expected output was printed
actual_stdout, err = capfd.readouterr()
expected_stdout = f"{mock_curr_version[0]} (set by {mock_curr_version[1]})\n"
assert actual_stdout == expected_stdout
# Ensure every mocked function was called the appropriate number of times
mock_hub.saltenv.ops.get_current_version.assert_called_once_with()
async def test_unit_version_nonexistent(mock_hub, hub, capfd):
"""
SCENARIO #2:
- There is not a current version
"""
# Link the function to the mock_hub
mock_hub.saltenv.cli.version = hub.saltenv.cli.version
# Mock the get_current_version function to return a mock version
mock_curr_version = ("", "")
mock_hub.saltenv.ops.get_current_version.return_value = mock_curr_version
# Call version
await mock_hub.saltenv.cli.version()
# Check that the expected output was printed
actual_stdout, err = capfd.readouterr()
expected_stdout = "ERROR: No version of Salt is set!\n"
assert actual_stdout == expected_stdout
# Ensure every mocked function was called the appropriate number of times
mock_hub.saltenv.ops.get_current_version.assert_called_once_with()
| 34.42
| 81
| 0.732132
| 248
| 1,721
| 4.83871
| 0.266129
| 0.07
| 0.093333
| 0.1
| 0.84
| 0.801667
| 0.801667
| 0.801667
| 0.801667
| 0.801667
| 0
| 0.008584
| 0.187682
| 1,721
| 49
| 82
| 35.122449
| 0.849785
| 0.260895
| 0
| 0.631579
| 0
| 0
| 0.087225
| 0.04141
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5012313fc3c7592fdf103225c2a98d6ac307a6aa
| 5,498
|
py
|
Python
|
tests/test_okr_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 7
|
2021-08-18T00:42:05.000Z
|
2022-03-14T09:49:15.000Z
|
tests/test_okr_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | null | null | null |
tests/test_okr_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 1
|
2022-03-14T09:49:20.000Z
|
2022-03-14T09:49:20.000Z
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed")
def mock_raw_request(*args, **kwargs):
raise pylark.PyLarkError(
scope="scope", func="func", code=1, msg="mock-raw-request-failed"
)
# mock get token
class TestOKRSampleMockGetTokenFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestOKRSampleMockGetTokenFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed
self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed
self.module_cli = self.cli.okr
def test_mock_get_token_get_okr_period_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_okr_period_list(pylark.GetOKRPeriodListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_get_okr(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_okr(pylark.BatchGetOKRReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_user_okr_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_user_okr_list(pylark.GetUserOKRListReq())
assert "msg=failed" in f"{e}"
# mock mock self func
class TestOKRSampleMockSelfFuncFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestOKRSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.okr
def test_mock_self_func_get_okr_period_list(self):
origin_func = self.module_cli.get_okr_period_list
self.module_cli.get_okr_period_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_okr_period_list(pylark.GetOKRPeriodListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_okr_period_list = origin_func
def test_mock_self_func_batch_get_okr(self):
origin_func = self.module_cli.batch_get_okr
self.module_cli.batch_get_okr = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_okr(pylark.BatchGetOKRReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_get_okr = origin_func
def test_mock_self_func_get_user_okr_list(self):
origin_func = self.module_cli.get_user_okr_list
self.module_cli.get_user_okr_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_user_okr_list(pylark.GetUserOKRListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_user_okr_list = origin_func
# mock raw request
class TestOKRSampleMockRawRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestOKRSampleMockRawRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.okr
self.cli.raw_request = mock_raw_request
def test_mock_raw_request_get_okr_period_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_okr_period_list(pylark.GetOKRPeriodListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_get_okr(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_okr(pylark.BatchGetOKRReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_user_okr_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_user_okr_list(
pylark.GetUserOKRListReq(
user_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
# real request
class TestOKRSampleRealRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestOKRSampleRealRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_no_permission.ins()
self.module_cli = self.cli.okr
def test_real_request_get_okr_period_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_okr_period_list(pylark.GetOKRPeriodListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_get_okr(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_okr(pylark.BatchGetOKRReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_user_okr_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_user_okr_list(
pylark.GetUserOKRListReq(
user_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
| 35.701299
| 83
| 0.693161
| 740
| 5,498
| 4.825676
| 0.101351
| 0.070008
| 0.091011
| 0.058807
| 0.83114
| 0.825539
| 0.786894
| 0.739569
| 0.66676
| 0.632036
| 0
| 0.001853
| 0.214805
| 5,498
| 153
| 84
| 35.934641
| 0.825342
| 0.019825
| 0
| 0.555556
| 1
| 0
| 0.040126
| 0.017091
| 0
| 0
| 0
| 0
| 0.194444
| 1
| 0.166667
| false
| 0
| 0.046296
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
acce5dd608be450ffa4c64e06a52b7687c3eb08a
| 42
|
py
|
Python
|
xinyu/python/node/tagTreeNote/workflows/__init__.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | 1
|
2020-11-06T20:39:11.000Z
|
2020-11-06T20:39:11.000Z
|
xinyu/python/node/tagTreeNote/workflows/__init__.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | 1
|
2021-08-28T02:29:51.000Z
|
2021-08-28T02:29:51.000Z
|
xinyu/python/node/tagTreeNote/workflows/__init__.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | null | null | null |
# Created by Xinyu Zhu on 2021/8/31, 2:09
| 21
| 41
| 0.690476
| 10
| 42
| 2.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.190476
| 42
| 1
| 42
| 42
| 0.558824
| 0.928571
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c581e87b32f68131524302a474e9b0ea36b86db9
| 63
|
py
|
Python
|
vision/transforms/__init__.py
|
YoNyeoSeok/refinenet-pytorch
|
34dfa49a141630247aef1d5d2424c823ecba46c7
|
[
"BSD-2-Clause"
] | null | null | null |
vision/transforms/__init__.py
|
YoNyeoSeok/refinenet-pytorch
|
34dfa49a141630247aef1d5d2424c823ecba46c7
|
[
"BSD-2-Clause"
] | null | null | null |
vision/transforms/__init__.py
|
YoNyeoSeok/refinenet-pytorch
|
34dfa49a141630247aef1d5d2424c823ecba46c7
|
[
"BSD-2-Clause"
] | null | null | null |
from .transforms import RandomHorizontalFlip, RandomResizedCrop
| 63
| 63
| 0.904762
| 5
| 63
| 11.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.966102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5aa8a5200e353d2b9d5b4e5cdb73e592954c19d
| 188
|
py
|
Python
|
distance_plot.py
|
ks8/conformation
|
f470849d5b7b90dc5a65bab8a536de1d57c1021a
|
[
"MIT"
] | null | null | null |
distance_plot.py
|
ks8/conformation
|
f470849d5b7b90dc5a65bab8a536de1d57c1021a
|
[
"MIT"
] | null | null | null |
distance_plot.py
|
ks8/conformation
|
f470849d5b7b90dc5a65bab8a536de1d57c1021a
|
[
"MIT"
] | null | null | null |
""" Plot distributions of atomic pairwise distances. """
from conformation.distance_plot import distance_plot, Args
if __name__ == '__main__':
distance_plot(Args().parse_args())
| 31.333333
| 59
| 0.739362
| 22
| 188
| 5.772727
| 0.681818
| 0.283465
| 0.251969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 188
| 5
| 60
| 37.6
| 0.79375
| 0.255319
| 0
| 0
| 0
| 0
| 0.062992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
68050b9c1cb1aeb7df4c536e6d9ab1c7ea86489e
| 25
|
py
|
Python
|
sumbert/__init__.py
|
pratikghanwat7/sumbert
|
d349a82d21544328f5af86d654bbd38d8f0241fe
|
[
"Apache-2.0"
] | 5
|
2020-04-24T08:53:33.000Z
|
2021-02-02T08:45:18.000Z
|
sumbert/__init__.py
|
pratikghanwat7/sumbert
|
d349a82d21544328f5af86d654bbd38d8f0241fe
|
[
"Apache-2.0"
] | 4
|
2020-06-07T07:55:49.000Z
|
2021-03-18T05:48:00.000Z
|
sumbert/__init__.py
|
pratikghanwat7/sumbert
|
d349a82d21544328f5af86d654bbd38d8f0241fe
|
[
"Apache-2.0"
] | 1
|
2020-06-15T16:58:35.000Z
|
2020-06-15T16:58:35.000Z
|
from .summarize import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a851997e2f14c3bdd1e704a0aed0be6c7575f329
| 48
|
py
|
Python
|
Audio/Speech-Emotion-Analyzer/utils/__init__.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 359
|
2019-04-11T04:53:12.000Z
|
2022-03-31T16:32:58.000Z
|
Audio/Speech-Emotion-Analyzer/utils/__init__.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 64
|
2021-05-31T10:34:06.000Z
|
2022-01-17T03:44:58.000Z
|
Audio/Speech-Emotion-Analyzer/utils/__init__.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 129
|
2019-04-15T12:24:15.000Z
|
2022-03-31T16:32:53.000Z
|
from .opts import parse_opt
from .plot import *
| 16
| 27
| 0.770833
| 8
| 48
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 28
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a89d74759860142acb24ea31f339b80f01924df6
| 4,262
|
py
|
Python
|
tests/test_loaders.py
|
siddhantgoel/flask-filealchemy
|
448866ef955d0e3259769c5b0cd663a6f361320b
|
[
"MIT"
] | 16
|
2018-10-16T03:32:39.000Z
|
2020-09-04T02:05:37.000Z
|
tests/test_loaders.py
|
siddhantgoel/flask-filealchemy
|
448866ef955d0e3259769c5b0cd663a6f361320b
|
[
"MIT"
] | 8
|
2019-02-25T10:59:15.000Z
|
2019-03-11T08:36:57.000Z
|
tests/test_loaders.py
|
siddhantgoel/flask-filealchemy
|
448866ef955d0e3259769c5b0cd663a6f361320b
|
[
"MIT"
] | 3
|
2019-11-22T23:46:16.000Z
|
2020-06-05T19:17:23.000Z
|
from pathlib import Path
import pytest
from sqlalchemy import Column, String
from flask_filealchemy.loaders import (
BaseLoader,
loader_for,
MarkdownFrontmatterDirectoryLoader,
YAMLDirectoryLoader,
YAMLFileLoader,
)
def test_base_loader_does_not_validate():
with pytest.raises(NotImplementedError):
BaseLoader(None, None)
def test_yaml_file_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('_all.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLFileLoader
)
def test_no_loader_found(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('invalid.md').write('does-not-matter')
authors.join('valid.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert not loader_for(Path(tmpdir.strpath), author_table)
def test_yaml_directory_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('first.yml').write('does-not-matter')
authors.join('second.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLDirectoryLoader
)
def test_yaml_directory_loader_with_extra_extensions(db, tmpdir):
authors = tmpdir.mkdir('authors')
for index, extension in enumerate(YAMLDirectoryLoader.extensions):
authors.join('authors-{}.{}'.format(index, extension)).write(
'does-not-matter'
)
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLDirectoryLoader
)
def test_markdown_frontmatter_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('first.md').write('does-not-matter')
authors.join('second.md').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table),
MarkdownFrontmatterDirectoryLoader,
)
def test_markdown_frontmatter_loader_with_extra_extensions(db, tmpdir):
authors = tmpdir.mkdir('authors')
for index, extension in enumerate(
MarkdownFrontmatterDirectoryLoader.extensions
):
authors.join('authors-{}.{}'.format(index, extension)).write(
'does-not-matter'
)
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table),
MarkdownFrontmatterDirectoryLoader,
)
| 27.675325
| 75
| 0.685124
| 503
| 4,262
| 5.610338
| 0.153082
| 0.063785
| 0.102055
| 0.140326
| 0.862155
| 0.832743
| 0.832743
| 0.783133
| 0.767541
| 0.749823
| 0
| 0.015684
| 0.192163
| 4,262
| 153
| 76
| 27.856209
| 0.80395
| 0
| 0
| 0.627451
| 0
| 0
| 0.082121
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.068627
| false
| 0
| 0.039216
| 0
| 0.343137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8c845c7ff0b37f5e9929f67cb3e4895d481d2d9
| 356
|
py
|
Python
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_01_02MasterChefCan_bop_test.py"
OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/06_07TunaFishCan"
DATASETS = dict(TRAIN=("ycbv_007_tuna_fish_can_train_pbr",))
| 89
| 156
| 0.907303
| 48
| 356
| 6
| 0.708333
| 0.076389
| 0.180556
| 0.256944
| 0.548611
| 0.548611
| 0.548611
| 0.548611
| 0.548611
| 0.548611
| 0
| 0.089337
| 0.025281
| 356
| 3
| 157
| 118.666667
| 0.740634
| 0
| 0
| 0
| 0
| 0
| 0.839888
| 0.839888
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
768fb6e99a5cda9ce9f0ef4711a7e6f92ee58c51
| 84
|
py
|
Python
|
beproud/django/commons/views/__init__.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 2
|
2016-03-07T01:52:12.000Z
|
2017-08-30T06:14:43.000Z
|
beproud/django/commons/views/__init__.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 18
|
2015-03-08T13:52:18.000Z
|
2022-01-25T02:46:09.000Z
|
beproud/django/commons/views/__init__.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 2
|
2015-02-07T01:33:00.000Z
|
2015-09-08T14:57:44.000Z
|
from __future__ import absolute_import
from .simple import *
from .classes import *
| 21
| 38
| 0.809524
| 11
| 84
| 5.727273
| 0.545455
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 84
| 3
| 39
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76bf34682ae73fb349d93e1070e2115eea0228ba
| 139
|
py
|
Python
|
utils/util.py
|
xuqinghan/flv-extract-audio-and-video
|
e4c0c42119e6ea4478817c04e21ffe341bfc4189
|
[
"MIT"
] | 2
|
2020-11-07T14:20:32.000Z
|
2021-03-12T13:53:58.000Z
|
utils/util.py
|
xuqinghan/flv-extract-audio-and-video
|
e4c0c42119e6ea4478817c04e21ffe341bfc4189
|
[
"MIT"
] | null | null | null |
utils/util.py
|
xuqinghan/flv-extract-audio-and-video
|
e4c0c42119e6ea4478817c04e21ffe341bfc4189
|
[
"MIT"
] | 2
|
2020-11-07T21:28:45.000Z
|
2021-12-20T16:19:40.000Z
|
def bytes_to_int(bytes_string):
'''
pack of the int.from_bytes
'''
return int.from_bytes(bytes_string, byteorder="big")
| 27.8
| 56
| 0.661871
| 20
| 139
| 4.3
| 0.6
| 0.255814
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215827
| 139
| 5
| 56
| 27.8
| 0.788991
| 0.18705
| 0
| 0
| 0
| 0
| 0.031915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
4f23c578db85f03bcab7fff6ba0e58b179e7afc9
| 483
|
py
|
Python
|
Mundo-1/ex009.py
|
Gabriel-Leao/Exercicios-de-python
|
71933d24ab938d9cd2f4d64dc784b79cb8e756d2
|
[
"MIT"
] | null | null | null |
Mundo-1/ex009.py
|
Gabriel-Leao/Exercicios-de-python
|
71933d24ab938d9cd2f4d64dc784b79cb8e756d2
|
[
"MIT"
] | null | null | null |
Mundo-1/ex009.py
|
Gabriel-Leao/Exercicios-de-python
|
71933d24ab938d9cd2f4d64dc784b79cb8e756d2
|
[
"MIT"
] | null | null | null |
num = int(input('Digite um número para ver sua tabuada: '))
print('\033[1;97m-'*20)
print(f'{num} x {1:>2} = {num * 1}')
print(f'{num} x {2:>2} = {num * 2}')
print(f'{num} x {3:>2} = {num * 3}')
print(f'{num} x {4:>2} = {num * 4}')
print(f'{num} x {5:>2} = {num * 5}')
print(f'{num} x {6:>2} = {num * 6}')
print(f'{num} x {7:>2} = {num * 7}')
print(f'{num} x {8:>2} = {num * 8}')
print(f'{num} x {9:>2} = {num * 9}')
print(f'{num} x {10} = {num * 10}')
print('\033[1;97m-\033[m'*20)
| 34.5
| 59
| 0.486542
| 101
| 483
| 2.326733
| 0.267327
| 0.255319
| 0.382979
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124688
| 0.169772
| 483
| 13
| 60
| 37.153846
| 0.461347
| 0
| 0
| 0
| 0
| 0
| 0.674948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.923077
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4f4dedb08d98871e9b9a6fb8e823daa68b174d90
| 23
|
py
|
Python
|
MultidimensionalUnittests.py
|
carlosal1015/CalculusOfVariations
|
2d8ec4cac8a5b207c48e73453947017d7081aea0
|
[
"Apache-2.0"
] | null | null | null |
MultidimensionalUnittests.py
|
carlosal1015/CalculusOfVariations
|
2d8ec4cac8a5b207c48e73453947017d7081aea0
|
[
"Apache-2.0"
] | null | null | null |
MultidimensionalUnittests.py
|
carlosal1015/CalculusOfVariations
|
2d8ec4cac8a5b207c48e73453947017d7081aea0
|
[
"Apache-2.0"
] | 1
|
2020-07-15T04:33:28.000Z
|
2020-07-15T04:33:28.000Z
|
# ToDo Сделать unittest
| 23
| 23
| 0.826087
| 3
| 23
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0.913043
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f51e552b5de70f83bf97e4f98c8f3e51bbb81f9
| 95
|
py
|
Python
|
test/test_cli.py
|
FabianElsmer/rueckenwind
|
255b026009edcdc41b6a5ad7cbae3e5e4970696c
|
[
"Apache-2.0"
] | 3
|
2015-09-03T07:39:57.000Z
|
2020-01-28T09:14:04.000Z
|
test/test_cli.py
|
FabianElsmer/rueckenwind
|
255b026009edcdc41b6a5ad7cbae3e5e4970696c
|
[
"Apache-2.0"
] | 6
|
2015-05-09T13:26:12.000Z
|
2017-07-13T14:22:31.000Z
|
test/test_cli.py
|
FabianElsmer/rueckenwind
|
255b026009edcdc41b6a5ad7cbae3e5e4970696c
|
[
"Apache-2.0"
] | 5
|
2015-05-13T08:58:22.000Z
|
2020-09-10T14:49:43.000Z
|
import os
import sys
import tempfile
import imp
import shutil
import rw.testing
import rw.cli
| 10.555556
| 17
| 0.821053
| 16
| 95
| 4.875
| 0.5625
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 8
| 18
| 11.875
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f684fa3e5e11686f13e230fb927e0bcd96b8769
| 293
|
py
|
Python
|
src/package/03/use.py
|
privong/still-magic
|
1d651840497d66d44ff43528f6e1f38e698ce168
|
[
"CC-BY-4.0"
] | 190
|
2020-09-04T20:33:53.000Z
|
2022-02-12T10:09:52.000Z
|
src/package/03/use.py
|
privong/still-magic
|
1d651840497d66d44ff43528f6e1f38e698ce168
|
[
"CC-BY-4.0"
] | 134
|
2020-09-03T16:30:00.000Z
|
2021-11-10T01:05:05.000Z
|
src/package/03/use.py
|
privong/still-magic
|
1d651840497d66d44ff43528f6e1f38e698ce168
|
[
"CC-BY-4.0"
] | 41
|
2020-09-03T22:35:44.000Z
|
2022-03-26T01:14:59.000Z
|
from zipf import make_zipf, is_zipf
generated = make_zipf(5)
print('generated distribution: {}'.format(generated))
generated[-1] *= 2
print('passes test with default tolerance: {}'.format(is_zipf(generated)))
print('passes test with tolerance of 1.0: {}'.format(is_zipf(generated, rel=1.0)))
| 36.625
| 82
| 0.740614
| 44
| 293
| 4.818182
| 0.454545
| 0.084906
| 0.212264
| 0.179245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026515
| 0.098976
| 293
| 7
| 83
| 41.857143
| 0.776515
| 0
| 0
| 0
| 1
| 0
| 0.34471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.166667
| 0
| 0.166667
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
4f77d864e2fc8bafe3a11788df1ee500e78a3be9
| 13,989
|
py
|
Python
|
lola/corrections.py
|
011235813/lola
|
d7c43b82a425b424795c9ca3ee6f69973cef2f33
|
[
"MIT"
] | null | null | null |
lola/corrections.py
|
011235813/lola
|
d7c43b82a425b424795c9ca3ee6f69973cef2f33
|
[
"MIT"
] | null | null | null |
lola/corrections.py
|
011235813/lola
|
d7c43b82a425b424795c9ca3ee6f69973cef2f33
|
[
"MIT"
] | null | null | null |
"""
The magic corrections of LOLA.
"""
import tensorflow as tf
from .utils import flatgrad
def corrections_func(mainPN, batch_size, trace_length,
corrections=False, cube=None):
"""Computes corrections for policy gradients.
Args:
-----
mainPN: list of policy/Q-networks
batch_size: int
trace_length: int
corrections: bool (default: False)
Whether policy networks should use corrections.
cube: tf.Varialbe or None (default: None)
If provided, should be constructed via `lola.utils.make_cube`.
Used for variance reduction of the value estimation.
When provided, the computation graph for corrections is faster to
compile but is quite memory inefficient.
When None, variance reduction graph is contructed dynamically,
is a little longer to compile, but has lower memory footprint.
"""
if cube is not None:
ac_logp0 = tf.reshape(mainPN[0].log_pi_action_bs_t,
[batch_size, 1, trace_length])
ac_logp1 = tf.reshape(mainPN[1].log_pi_action_bs_t,
[batch_size, trace_length, 1])
mat_1 = tf.reshape(tf.squeeze(tf.matmul(ac_logp1, ac_logp0)),
[batch_size, 1, trace_length * trace_length])
v_0 = tf.matmul(tf.reshape(mainPN[0].sample_reward, [batch_size, trace_length, 1]), mat_1)
v_0 = tf.reshape(v_0, [batch_size, trace_length, trace_length, trace_length])
v_1 = tf.matmul(tf.reshape(mainPN[1].sample_reward, [batch_size, trace_length, 1]), mat_1)
v_1 = tf.reshape(v_1, [batch_size, trace_length, trace_length, trace_length])
v_0 = 2 * tf.reduce_sum(v_0 * cube) / batch_size
v_1 = 2 * tf.reduce_sum(v_1 * cube) / batch_size
else:
ac_logp0 = tf.reshape(mainPN[0].log_pi_action_bs_t,
[batch_size, trace_length])
ac_logp1 = tf.reshape(mainPN[1].log_pi_action_bs_t,
[batch_size, trace_length])
# Static exclusive cumsum
ac_logp0_cumsum = [tf.constant(0.)]
ac_logp1_cumsum = [tf.constant(0.)]
for i in range(trace_length - 1):
ac_logp0_cumsum.append(tf.add(ac_logp0_cumsum[-1], ac_logp0[:, i]))
ac_logp1_cumsum.append(tf.add(ac_logp1_cumsum[-1], ac_logp1[:, i]))
# Compute v_0 and v_1
mat_cumsum = ac_logp0[:, 0] * ac_logp1[:, 0]
v_0 = mat_cumsum * mainPN[0].sample_reward[:, 0]
v_1 = mat_cumsum * mainPN[1].sample_reward[:, 0]
for i in range(1, trace_length):
mat_cumsum = tf.add(mat_cumsum, ac_logp0[:, i] * ac_logp1[:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp0_cumsum[i] * ac_logp1[:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp1_cumsum[i] * ac_logp0[:, i])
v_0 = tf.add(v_0, mat_cumsum * mainPN[0].sample_reward[:, i])
v_1 = tf.add(v_1, mat_cumsum * mainPN[1].sample_reward[:, i])
v_0 = 2 * tf.reduce_sum(v_0) / batch_size
v_1 = 2 * tf.reduce_sum(v_1) / batch_size
v_0_pi_0 = 2*tf.reduce_sum(((mainPN[0].target-tf.stop_gradient(mainPN[0].value)) * mainPN[0].gamma_array) * mainPN[0].log_pi_action_bs_t) / batch_size
v_0_pi_1 = 2*tf.reduce_sum(((mainPN[0].target-tf.stop_gradient(mainPN[0].value)) * mainPN[1].gamma_array) * mainPN[1].log_pi_action_bs_t) / batch_size
v_1_pi_0 = 2*tf.reduce_sum(((mainPN[1].target-tf.stop_gradient(mainPN[1].value)) * mainPN[0].gamma_array) * mainPN[0].log_pi_action_bs_t) / batch_size
v_1_pi_1 = 2*tf.reduce_sum(((mainPN[1].target-tf.stop_gradient(mainPN[1].value)) * mainPN[1].gamma_array) * mainPN[1].log_pi_action_bs_t) / batch_size
v_0_grad_theta_0 = flatgrad(v_0_pi_0, mainPN[0].parameters)
v_0_grad_theta_1 = flatgrad(v_0_pi_1, mainPN[1].parameters)
v_1_grad_theta_0 = flatgrad(v_1_pi_0, mainPN[0].parameters)
v_1_grad_theta_1 = flatgrad(v_1_pi_1, mainPN[1].parameters)
mainPN[0].grad = v_0_grad_theta_0
mainPN[1].grad = v_1_grad_theta_1
mainPN[0].grad_v_1 = v_1_grad_theta_0
mainPN[1].grad_v_0 = v_0_grad_theta_1
if corrections:
v_0_grad_theta_0_wrong = flatgrad(v_0, mainPN[0].parameters)
v_1_grad_theta_1_wrong = flatgrad(v_1, mainPN[1].parameters)
param_len = v_0_grad_theta_0_wrong.get_shape()[0].value
multiply0 = tf.matmul(
tf.reshape(tf.stop_gradient(v_0_grad_theta_1), [1, param_len]),
tf.reshape(v_1_grad_theta_1_wrong, [param_len, 1])
)
multiply1 = tf.matmul(
tf.reshape(tf.stop_gradient(v_1_grad_theta_0), [1, param_len]),
tf.reshape(v_0_grad_theta_0_wrong, [param_len, 1])
)
second_order0 = flatgrad(multiply0, mainPN[0].parameters)
second_order1 = flatgrad(multiply1, mainPN[1].parameters)
mainPN[0].v_0_grad_01 = second_order0
mainPN[1].v_1_grad_10 = second_order1
mainPN[0].delta = v_0_grad_theta_0 + second_order0
mainPN[1].delta = v_1_grad_theta_1 + second_order1
else:
mainPN[0].delta = v_0_grad_theta_0
mainPN[1].delta = v_1_grad_theta_1
def corrections_func_lola_pg(mainPN, batch_size, trace_length,
cube=None):
"""Computes corrections for policy gradients.
Agent 0 is LOLA, Agent 1 is policy gradient.
Args:
-----
mainPN: list of policy/Q-networks
batch_size: int
trace_length: int
cube: tf.Varialbe or None (default: None)
If provided, should be constructed via `lola.utils.make_cube`.
Used for variance reduction of the value estimation.
When provided, the computation graph for corrections is faster to
compile but is quite memory inefficient.
When None, variance reduction graph is contructed dynamically,
is a little longer to compile, but has lower memory footprint.
"""
if cube is not None:
ac_logp0 = tf.reshape(mainPN[0].log_pi_action_bs_t,
[batch_size, 1, trace_length])
ac_logp1 = tf.reshape(mainPN[1].log_pi_action_bs_t,
[batch_size, trace_length, 1])
mat_1 = tf.reshape(tf.squeeze(tf.matmul(ac_logp1, ac_logp0)),
[batch_size, 1, trace_length * trace_length])
v_0 = tf.matmul(tf.reshape(mainPN[0].sample_reward, [batch_size, trace_length, 1]), mat_1)
v_0 = tf.reshape(v_0, [batch_size, trace_length, trace_length, trace_length])
v_1 = tf.matmul(tf.reshape(mainPN[1].sample_reward, [batch_size, trace_length, 1]), mat_1)
v_1 = tf.reshape(v_1, [batch_size, trace_length, trace_length, trace_length])
v_0 = 2 * tf.reduce_sum(v_0 * cube) / batch_size
v_1 = 2 * tf.reduce_sum(v_1 * cube) / batch_size
else:
ac_logp0 = tf.reshape(mainPN[0].log_pi_action_bs_t,
[batch_size, trace_length])
ac_logp1 = tf.reshape(mainPN[1].log_pi_action_bs_t,
[batch_size, trace_length])
# Static exclusive cumsum
ac_logp0_cumsum = [tf.constant(0.)]
ac_logp1_cumsum = [tf.constant(0.)]
for i in range(trace_length - 1):
ac_logp0_cumsum.append(tf.add(ac_logp0_cumsum[-1], ac_logp0[:, i]))
ac_logp1_cumsum.append(tf.add(ac_logp1_cumsum[-1], ac_logp1[:, i]))
# Compute v_0 and v_1
mat_cumsum = ac_logp0[:, 0] * ac_logp1[:, 0]
v_0 = mat_cumsum * mainPN[0].sample_reward[:, 0]
v_1 = mat_cumsum * mainPN[1].sample_reward[:, 0]
for i in range(1, trace_length):
mat_cumsum = tf.add(mat_cumsum, ac_logp0[:, i] * ac_logp1[:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp0_cumsum[i] * ac_logp1[:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp1_cumsum[i] * ac_logp0[:, i])
v_0 = tf.add(v_0, mat_cumsum * mainPN[0].sample_reward[:, i])
v_1 = tf.add(v_1, mat_cumsum * mainPN[1].sample_reward[:, i])
v_0 = 2 * tf.reduce_sum(v_0) / batch_size
v_1 = 2 * tf.reduce_sum(v_1) / batch_size
v_0_pi_0 = 2*tf.reduce_sum(((mainPN[0].target-tf.stop_gradient(mainPN[0].value)) * mainPN[0].gamma_array) * mainPN[0].log_pi_action_bs_t) / batch_size
v_0_pi_1 = 2*tf.reduce_sum(((mainPN[0].target-tf.stop_gradient(mainPN[0].value)) * mainPN[1].gamma_array) * mainPN[1].log_pi_action_bs_t) / batch_size
v_1_pi_0 = 2*tf.reduce_sum(((mainPN[1].target-tf.stop_gradient(mainPN[1].value)) * mainPN[0].gamma_array) * mainPN[0].log_pi_action_bs_t) / batch_size
v_1_pi_1 = 2*tf.reduce_sum(((mainPN[1].target-tf.stop_gradient(mainPN[1].value)) * mainPN[1].gamma_array) * mainPN[1].log_pi_action_bs_t) / batch_size
v_0_grad_theta_0 = flatgrad(v_0_pi_0, mainPN[0].parameters)
v_0_grad_theta_1 = flatgrad(v_0_pi_1, mainPN[1].parameters)
v_1_grad_theta_0 = flatgrad(v_1_pi_0, mainPN[0].parameters)
v_1_grad_theta_1 = flatgrad(v_1_pi_1, mainPN[1].parameters)
mainPN[0].grad = v_0_grad_theta_0
mainPN[1].grad = v_1_grad_theta_1
mainPN[0].grad_v_1 = v_1_grad_theta_0
mainPN[1].grad_v_0 = v_0_grad_theta_1
# Corrections enabled for V0
v_0_grad_theta_0_wrong = flatgrad(v_0, mainPN[0].parameters)
v_1_grad_theta_1_wrong = flatgrad(v_1, mainPN[1].parameters)
# param_len = v_0_grad_theta_0_wrong.get_shape()[0].value
param_len = v_1_grad_theta_1_wrong.get_shape()[0].value
multiply0 = tf.matmul(
tf.reshape(tf.stop_gradient(v_0_grad_theta_1), [1, param_len]),
tf.reshape(v_1_grad_theta_1_wrong, [param_len, 1])
)
second_order0 = flatgrad(multiply0, mainPN[0].parameters)
mainPN[0].v_0_grad_01 = second_order0
mainPN[0].delta = v_0_grad_theta_0 + second_order0
# Correction disabled for V1
mainPN[1].delta = v_1_grad_theta_1
def corrections_func_3player(mainPN, batch_size, trace_length):
"""Computes corrections for policy gradients.
Corresponds to the case of Corrections=True, Cube=None in
the original corrections_func
Args:
-----
mainPN: list of policy/Q-networks
batch_size: int
trace_length: int
"""
n_agents = len(mainPN)
# ------ else case of original cube condition ---------- #
ac_logp0 = tf.reshape(mainPN[0].log_pi_action_bs_t,
[batch_size, trace_length])
ac_logp1 = tf.reshape(mainPN[1].log_pi_action_bs_t,
[batch_size, trace_length])
ac_logp2 = tf.reshape(mainPN[2].log_pi_action_bs_t,
[batch_size, trace_length])
ac_logp = [ac_logp0, ac_logp1, ac_logp2]
# Static exclusive cumsum
ac_logp0_cumsum = [tf.constant(0.)]
ac_logp1_cumsum = [tf.constant(0.)]
ac_logp2_cumsum = [tf.constant(0.)]
for i in range(trace_length - 1):
ac_logp0_cumsum.append(tf.add(ac_logp0_cumsum[-1], ac_logp0[:, i]))
ac_logp1_cumsum.append(tf.add(ac_logp1_cumsum[-1], ac_logp1[:, i]))
ac_logp2_cumsum.append(tf.add(ac_logp2_cumsum[-1], ac_logp2[:, i]))
ac_logp_cumsum = [ac_logp0_cumsum, ac_logp1_cumsum, ac_logp2_cumsum]
v_i_pi_i = [None] * n_agents
v_i_grad_theta_i = [None] * n_agents
for i in range(n_agents):
v_i_pi_i[i] = 2*tf.reduce_sum(((mainPN[i].target-tf.stop_gradient(mainPN[i].value)) * mainPN[i].gamma_array) * mainPN[i].log_pi_action_bs_t) / batch_size
v_i_grad_theta_i[i] = flatgrad(v_i_pi_i[i], mainPN[i].parameters)
mainPN[i].delta = v_i_grad_theta_i[i]
for ai in range(n_agents):
for aj in range(ai, n_agents):
# Compute v_i and v_j
mat_cumsum = ac_logp[ai][:, 0] * ac_logp[aj][:, 0]
v_ij = mat_cumsum * mainPN[ai].sample_reward[:, 0]
v_ji = mat_cumsum * mainPN[aj].sample_reward[:, 0]
for i in range(1, trace_length):
mat_cumsum = tf.add(mat_cumsum, ac_logp[ai][:, i] * ac_logp[aj][:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp_cumsum[ai][i] * ac_logp[aj][:, i])
mat_cumsum = tf.add(mat_cumsum, ac_logp_cumsum[aj][i] * ac_logp[ai][:, i])
v_ij = tf.add(v_ij, mat_cumsum * mainPN[ai].sample_reward[:, i])
v_ji = tf.add(v_ji, mat_cumsum * mainPN[aj].sample_reward[:, i])
v_ij = 2 * tf.reduce_sum(v_ij) / batch_size
v_ji = 2 * tf.reduce_sum(v_ji) / batch_size
v_i_pi_j = 2*tf.reduce_sum(((mainPN[ai].target-tf.stop_gradient(mainPN[ai].value)) * mainPN[aj].gamma_array) * mainPN[aj].log_pi_action_bs_t) / batch_size
v_j_pi_i = 2*tf.reduce_sum(((mainPN[aj].target-tf.stop_gradient(mainPN[aj].value)) * mainPN[ai].gamma_array) * mainPN[ai].log_pi_action_bs_t) / batch_size
v_i_grad_theta_j = flatgrad(v_i_pi_j, mainPN[aj].parameters)
v_j_grad_theta_i = flatgrad(v_j_pi_i, mainPN[ai].parameters)
mainPN[ai].grad = v_i_grad_theta_i[ai]
mainPN[aj].grad = v_i_grad_theta_i[aj]
v_i_grad_theta_i_wrong = flatgrad(v_ij, mainPN[ai].parameters)
v_j_grad_theta_j_wrong = flatgrad(v_ji, mainPN[aj].parameters)
param_len = v_i_grad_theta_i_wrong.get_shape()[0].value
multiplyi = tf.matmul(
tf.reshape(tf.stop_gradient(v_i_grad_theta_j), [1, param_len]),
tf.reshape(v_j_grad_theta_j_wrong, [param_len, 1])
)
multiplyj = tf.matmul(
tf.reshape(tf.stop_gradient(v_j_grad_theta_i), [1, param_len]),
tf.reshape(v_i_grad_theta_i_wrong, [param_len, 1])
)
second_orderi = flatgrad(multiplyi, mainPN[ai].parameters)
second_orderj = flatgrad(multiplyj, mainPN[aj].parameters)
mainPN[ai].delta = mainPN[ai].delta + second_orderi
mainPN[aj].delta = mainPN[aj].delta + second_orderj
| 46.168317
| 166
| 0.639931
| 2,214
| 13,989
| 3.685185
| 0.066847
| 0.013237
| 0.029661
| 0.035053
| 0.847898
| 0.798872
| 0.768477
| 0.757446
| 0.722147
| 0.722147
| 0
| 0.039264
| 0.238973
| 13,989
| 302
| 167
| 46.321192
| 0.727128
| 0.132318
| 0
| 0.652406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016043
| false
| 0
| 0.010695
| 0
| 0.026738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f78e80f46b6e83c08b5d6e4497ad93dd1b196ad
| 5,555
|
py
|
Python
|
src/com/facebook/buck/apple/project_generator/build_with_buck_test.py
|
illicitonion/buck
|
0336e37a5d9da94b6dcdf6ab78711c1788616ad0
|
[
"Apache-2.0"
] | 1
|
2022-01-25T13:13:09.000Z
|
2022-01-25T13:13:09.000Z
|
src/com/facebook/buck/apple/project_generator/build_with_buck_test.py
|
illicitonion/buck
|
0336e37a5d9da94b6dcdf6ab78711c1788616ad0
|
[
"Apache-2.0"
] | null | null | null |
src/com/facebook/buck/apple/project_generator/build_with_buck_test.py
|
illicitonion/buck
|
0336e37a5d9da94b6dcdf6ab78711c1788616ad0
|
[
"Apache-2.0"
] | 1
|
2022-01-25T13:14:45.000Z
|
2022-01-25T13:14:45.000Z
|
import unittest
import tempfile
import uuid
import os
import platform
import pkg_resources
from build_with_buck import *
XCODE_DWARF = "dwarf"
XCODE_DSYM = "dwarf-with-dsym"
class TestBuildWithBuck(unittest.TestCase):
def run_with_data(self,
platform_name,
archs,
valid_archs,
debug_format,
repo_root,
buck_path,
flags,
target,
dwarf_flavor,
dsym_flavor):
os.environ['PLATFORM_NAME'] = platform_name
os.environ['ARCHS'] = archs
os.environ['VALID_ARCHS'] = valid_archs
os.environ['DEBUG_INFORMATION_FORMAT'] = debug_format
return get_command(repo_root, buck_path, flags, target, dwarf_flavor, dsym_flavor)
def test_generating_single_arch_dsym(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DSYM,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DSYM_FLAVOR,some_plat-some_arch')
def test_generating_single_arch_dwarf(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,some_plat-some_arch')
def test_generating_single_arch_dwarf(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,some_plat-some_arch')
def test_generating_double_arch(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("plat",
"arch1 arch2",
"arch2 arch1",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,plat-arch1,plat-arch2')
def test_generating_unsupported_arch(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
with self.assertRaises(ValueError) as context:
self.run_with_data("some_plat",
"----UNSUPPORTED_ARCH----",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
def test_generating_unsupported_debug_format(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
with self.assertRaises(ValueError) as context:
self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
"------UNSUPPORTED-----",
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
if __name__ == '__main__':
unittest.main()
| 41.766917
| 99
| 0.406121
| 451
| 5,555
| 4.747228
| 0.155211
| 0.044839
| 0.087342
| 0.075666
| 0.754787
| 0.750117
| 0.750117
| 0.750117
| 0.734236
| 0.734236
| 0
| 0.002201
| 0.509271
| 5,555
| 132
| 100
| 42.083333
| 0.783199
| 0.051665
| 0
| 0.655172
| 0
| 0.008621
| 0.192549
| 0.046949
| 0
| 0
| 0
| 0
| 0.051724
| 1
| 0.060345
| false
| 0
| 0.060345
| 0
| 0.189655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f9a811f067af6d965e17093491e04565e0db9a2
| 156
|
py
|
Python
|
qdtrack/core/track/__init__.py
|
OceanPang/qdtrack
|
b905d2a599a87242d9cf3d01b1833eff155bf688
|
[
"Apache-2.0"
] | 241
|
2020-11-28T03:28:03.000Z
|
2022-03-31T13:27:01.000Z
|
qdtrack/core/track/__init__.py
|
msg4rajesh/qdtrack
|
b28af06c7fdb6ce99b967302c0c7e9a557d508bf
|
[
"Apache-2.0"
] | 61
|
2020-12-11T20:04:18.000Z
|
2022-03-05T13:49:05.000Z
|
qdtrack/core/track/__init__.py
|
msg4rajesh/qdtrack
|
b28af06c7fdb6ce99b967302c0c7e9a557d508bf
|
[
"Apache-2.0"
] | 37
|
2020-12-26T08:41:54.000Z
|
2022-03-29T21:52:44.000Z
|
from .similarity import cal_similarity
from .transforms import track2result, restore_result
__all__ = ['cal_similarity', 'track2result', 'restore_result']
| 31.2
| 62
| 0.814103
| 17
| 156
| 7
| 0.529412
| 0.218487
| 0.420168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.096154
| 156
| 4
| 63
| 39
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96e121114d79c00e32394d2cd285042d6a4a35f1
| 3,355
|
py
|
Python
|
src/dsalgo/sparse_table_test.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | 1
|
2022-02-10T02:13:07.000Z
|
2022-02-10T02:13:07.000Z
|
src/dsalgo/sparse_table_test.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | 6
|
2022-01-05T09:15:54.000Z
|
2022-01-09T05:48:43.000Z
|
src/dsalgo/sparse_table_test.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | null | null | null |
import operator
import unittest
import dsalgo.abstract_structure
import dsalgo.sparse_table
class TestSparseTable(unittest.TestCase):
def test_min(self) -> None:
a = [3, 1, 2, 10, -1]
semigroup = dsalgo.abstract_structure.Semigroup[int](min)
get_min = dsalgo.sparse_table.sparse_table(semigroup, a)
self.assertEqual(get_min(0, 5), -1)
self.assertEqual(get_min(0, 1), 3)
self.assertEqual(get_min(0, 3), 1)
class TestDisjointSparseTable(unittest.TestCase):
def test_min(self) -> None:
a = [3, 1, 2, 10, -1]
semigroup = dsalgo.abstract_structure.Semigroup[int](min)
get_min = dsalgo.sparse_table.disjoint_sparse_table(semigroup, a)
self.assertEqual(get_min(0, 5), -1)
self.assertEqual(get_min(0, 1), 3)
self.assertEqual(get_min(0, 3), 1)
def test_sum(self) -> None:
a = [3, 1, 2, 10, -1]
semigroup = dsalgo.abstract_structure.Semigroup[int](operator.add)
get_sum = dsalgo.sparse_table.disjoint_sparse_table(semigroup, a)
self.assertEqual(get_sum(0, 5), 15)
self.assertEqual(get_sum(0, 1), 3)
self.assertEqual(get_sum(0, 3), 6)
def test_xor(self) -> None:
a = [3, 1, 2, 10, 0]
semigroup = dsalgo.abstract_structure.Semigroup[int](operator.xor)
get_xor = dsalgo.sparse_table.disjoint_sparse_table(semigroup, a)
self.assertEqual(get_xor(0, 5), 10)
self.assertEqual(get_xor(0, 1), 3)
self.assertEqual(get_xor(0, 3), 0)
class TestDisjointSparseTableIntXor(unittest.TestCase):
def test(self) -> None:
a = [3, 1, 2, 10, 0]
get_xor = dsalgo.sparse_table.disjoint_sparse_table_int_xor(a)
self.assertEqual(get_xor(0, 5), 10)
self.assertEqual(get_xor(0, 1), 3)
self.assertEqual(get_xor(0, 3), 0)
class TestDisjointSparseTableIntSum(unittest.TestCase):
def test(self) -> None:
a = [3, 1, 2, 10, -1]
get_sum = dsalgo.sparse_table.disjoint_sparse_table_int_sum(a)
self.assertEqual(get_sum(0, 5), 15)
self.assertEqual(get_sum(0, 1), 3)
self.assertEqual(get_sum(0, 3), 6)
class TestSparseTable2D(unittest.TestCase):
def test(self) -> None:
a = [
[0, 1, 2, 3],
[4, 5, 6, 7],
[-1, 4, 0, 1],
]
semigroup = dsalgo.abstract_structure.Semigroup[int](min)
get_min = dsalgo.sparse_table.sparse_table_2d(semigroup, a)
self.assertEqual(get_min(0, 0, 3, 4), -1)
self.assertEqual(get_min(0, 1, 3, 4), 0)
self.assertEqual(get_min(1, 3, 2, 4), 7)
self.assertEqual(get_min(1, 1, 2, 3), 5)
self.assertEqual(get_min(0, 2, 2, 3), 2)
class TestSparseTable2DFixedShape(unittest.TestCase):
def test(self) -> None:
a = [
[0, 1, 2, 3],
[4, 5, 6, 7],
[-1, 4, 0, 1],
]
semigroup = dsalgo.abstract_structure.Semigroup[int](min)
get_min = dsalgo.sparse_table.sparse_table_2d_fixed_window(
semigroup,
a,
(2, 2),
)
self.assertEqual(get_min(0, 0), 0)
self.assertEqual(get_min(1, 0), -1)
self.assertEqual(get_min(0, 2), 2)
with self.assertRaises(IndexError):
get_min(0, 3)
if __name__ == "__main__":
unittest.main()
| 33.55
| 74
| 0.604769
| 468
| 3,355
| 4.155983
| 0.104701
| 0.200514
| 0.240617
| 0.151157
| 0.810283
| 0.798972
| 0.762468
| 0.687404
| 0.616452
| 0.616452
| 0
| 0.064231
| 0.257526
| 3,355
| 99
| 75
| 33.888889
| 0.71658
| 0
| 0
| 0.512195
| 0
| 0
| 0.002385
| 0
| 0
| 0
| 0
| 0
| 0.329268
| 1
| 0.097561
| false
| 0
| 0.04878
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96e22e20ed7911ac8d02ebdcb6885da529de1e69
| 1,545
|
py
|
Python
|
gab_toolbox/mail_tools.py
|
gbene/gab_toolbox
|
314d6e8f5abdaca8ae35ae68c614c96e7b77d49f
|
[
"MIT"
] | null | null | null |
gab_toolbox/mail_tools.py
|
gbene/gab_toolbox
|
314d6e8f5abdaca8ae35ae68c614c96e7b77d49f
|
[
"MIT"
] | null | null | null |
gab_toolbox/mail_tools.py
|
gbene/gab_toolbox
|
314d6e8f5abdaca8ae35ae68c614c96e7b77d49f
|
[
"MIT"
] | null | null | null |
import os
import smtplib
from email.message import EmailMessage as em
def success_mail(to_mail,info,from_mail=os.environ.get('python_sender'),from_pass=os.environ.get('python_sender_pass')):
msg = em()
msg['From'] = from_mail
msg['To'] = to_mail
msg['Subject'] = 'Python automatic script notification: Success!'
msg.set_content(f'The script finished succesfully, here are some informations:\n\n + Script name: {os.path.basename(__file__)}\n + {info}')
with smtplib.SMTP_SSL('smtp.gmail.com',465) as smtp:
smtp.login(from_mail, from_pass)
smtp.send_message(msg)
print('Notification mail send')
def error_mail(to_mail,info,from_mail=os.environ.get('python_sender'),from_pass=os.environ.get('python_sender_pass')):
msg = em()
msg['From'] = from_mail
msg['To'] = to_mail
msg['Subject'] = 'Python automatics script notification: Error!'
msg.set_content(f'An error occured, here are some informations:\n\n + Script name: {os.path.basename(__file__)}\n + {info}')
with smtplib.SMTP_SSL('smtp.gmail.com',465) as smtp:
smtp.login(from_mail, from_pass)
smtp.send_message(msg)
def half_way(to_mail,text,from_mail=os.environ.get('python_sender'),from_pass=os.environ.get('python_sender_pass')):
msg = em()
msg['From'] = from_mail
msg['To'] = to_mail
msg['Subject'] = 'Python automatic script notification: In progress'
msg.set_content(f'The script {os.path.basename(__file__)} is still running.')
with smtplib.SMTP_SSL('smtp.gmail.com',465) as smtp:
smtp.login(from_mail, from_pass)
smtp.send_message(msg)
| 32.87234
| 140
| 0.737864
| 246
| 1,545
| 4.414634
| 0.243902
| 0.066298
| 0.066298
| 0.099448
| 0.768877
| 0.768877
| 0.726519
| 0.726519
| 0.726519
| 0.726519
| 0
| 0.00656
| 0.111974
| 1,545
| 46
| 141
| 33.586957
| 0.784985
| 0
| 0
| 0.580645
| 0
| 0.064516
| 0.398964
| 0.056995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0.193548
| 0.096774
| 0
| 0.193548
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8c3c2b66dfa33505c807b7c5b63dee1b2b3673e8
| 1,297
|
py
|
Python
|
cowsay/lib/cows/dragon_and_cow.py
|
Ovlic/cowsay_py
|
1ee8d11d6d895d7695d57e26003d71ce18379d3b
|
[
"MIT"
] | null | null | null |
cowsay/lib/cows/dragon_and_cow.py
|
Ovlic/cowsay_py
|
1ee8d11d6d895d7695d57e26003d71ce18379d3b
|
[
"MIT"
] | null | null | null |
cowsay/lib/cows/dragon_and_cow.py
|
Ovlic/cowsay_py
|
1ee8d11d6d895d7695d57e26003d71ce18379d3b
|
[
"MIT"
] | null | null | null |
def Dragon_and_cow(thoughts, eyes, eye, tongue):
return f"""
{thoughts} ^ /^
{thoughts} / \\ // \\
{thoughts} |\\___/| / \\// .\\
{thoughts}"""+""" /O O \\__ / // | \\ \\ *----*
/ / \\/_/ // | \\ \\ \\ |
\@___\@\` \\/_ // | \\ \\ \\/\\ \\
0/0/| \\/_ // | \\ \\ \\ \\
0/0/0/0/| \\/// | \\ \\ | |
0/0/0/0/0/_|_ / ( // | \\ _\\ | /
0/0/0/0/0/0/\`/,_ _ _/ ) ; -. | _ _\\.-~ / /
,-} _ *-.|.-~-. .~ ~
\\ \\__/ \`/\\ / ~-. _ .-~ /
\\____("""+f"{eyes}"+""") *. } { /
( (--) .----~-.\\ \\-\` .~
//__\\\\"""+f"{tongue}"+"""\\__ Ack! ///.----..< \\ _ -~
// \\\\ ///-._ _ _ _ _ _ _{^ - - - - ~
"""
| 68.263158
| 91
| 0.115652
| 38
| 1,297
| 2.868421
| 0.342105
| 0.293578
| 0.412844
| 0.513761
| 0.155963
| 0.155963
| 0.155963
| 0.155963
| 0.155963
| 0.155963
| 0
| 0.036957
| 0.645335
| 1,297
| 19
| 92
| 68.263158
| 0.2
| 0
| 0
| 0
| 0
| 0.105263
| 0.92681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0.052632
| 0.105263
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ff6fcec3936420f871f842b78caf175cc95b446
| 84
|
py
|
Python
|
tdlda/__init__.py
|
jsosulski/tdlda
|
d3acc59d34e47a4f36773b3df86f0842089f65cd
|
[
"MIT"
] | null | null | null |
tdlda/__init__.py
|
jsosulski/tdlda
|
d3acc59d34e47a4f36773b3df86f0842089f65cd
|
[
"MIT"
] | null | null | null |
tdlda/__init__.py
|
jsosulski/tdlda
|
d3acc59d34e47a4f36773b3df86f0842089f65cd
|
[
"MIT"
] | null | null | null |
from .classification import TimeDecoupledLda
from .classification import Vectorizer
| 28
| 44
| 0.880952
| 8
| 84
| 9.25
| 0.625
| 0.486486
| 0.648649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 45
| 42
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4ffdcb3d960f6c0889595ba30cef62d17ba1f75c
| 8,762
|
py
|
Python
|
tests/test_formats/test_seq/test_birdsongrec.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_formats/test_seq/test_birdsongrec.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | 3
|
2018-12-16T17:57:22.000Z
|
2018-12-16T20:12:33.000Z
|
tests/test_formats/test_seq/test_birdsongrec.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
"""test functions in birdsongrec module"""
import numpy as np
import pytest
import soundfile
import crowsetta
from .asserts import assert_rounded_correct_num_decimals
@pytest.mark.parametrize(
'concat_seqs_into_songs',
[
True,
False,
]
)
def test_from_file(birdsong_rec_xml_file,
birdsong_rec_wav_path,
concat_seqs_into_songs):
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=birdsong_rec_wav_path,
concat_seqs_into_songs=concat_seqs_into_songs)
assert isinstance(birdsongrec, crowsetta.formats.seq.BirdsongRec)
if concat_seqs_into_songs:
n_wavs = len(sorted(birdsong_rec_wav_path.glob('*.wav')))
assert len(birdsongrec.sequences) == n_wavs
# we use these for two tests so define them here
ARGNAMES = 'concat_seqs_into_songs, samplerate, wav_path'
ARGVALUES = [
(True, None, None),
(False, None, None),
(True, 32000, None),
(False, 32000, None),
(True, None, 'birdsongrec/Bird0/Wave'),
(False, None, 'birdsongrec/Bird0/Wave'),
(True, 32000, 'birdsongrec/Bird0/Wave'),
(False, 32000, 'birdsongrec/Bird0/Wave'),
(True, None, 'birdsongrec/doesnt/exist'),
(False, None, 'birdsongrec/doesnt/exist'),
]
@pytest.mark.parametrize(
ARGNAMES, ARGVALUES
)
def test_to_seq(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path,
concat_seqs_into_songs,
samplerate,
wav_path):
if wav_path is None:
wav_path = birdsong_rec_wav_path
else:
wav_path = test_data_root / wav_path
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=wav_path,
concat_seqs_into_songs=concat_seqs_into_songs)
if not wav_path.exists():
with pytest.warns(UserWarning):
seqs = birdsongrec.to_seq(samplerate)
else:
seqs = birdsongrec.to_seq(samplerate)
assert isinstance(seqs, list)
assert all(
[isinstance(seq, crowsetta.Sequence) for seq in seqs]
)
if concat_seqs_into_songs:
n_wavs = len(sorted(birdsong_rec_wav_path.glob('*.wav')))
assert len(seqs) == n_wavs
@pytest.mark.parametrize(
'decimals',
[
1,
2,
3,
4,
5,
]
)
def test_to_seq_round_times_true(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path,
decimals):
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=birdsong_rec_wav_path,
concat_seqs_into_songs=True)
seqs = birdsongrec.to_seq(round_times=True, decimals=decimals)
onsets_s = [onset_s for seq in seqs for onset_s in seq.onsets_s]
offsets_s = [offset_s for seq in seqs for offset_s in seq.offsets_s]
assert_rounded_correct_num_decimals(onsets_s, decimals)
assert_rounded_correct_num_decimals(offsets_s, decimals)
def test_to_seq_round_times_false(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path):
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=birdsong_rec_wav_path,
concat_seqs_into_songs=True)
seqs = birdsongrec.to_seq(round_times=False)
onsets_s_from_to_seq = [onset_s for seq in seqs for onset_s in seq.onsets_s]
offsets_s_from_to_seq = [offset_s for seq in seqs for offset_s in seq.offsets_s]
# get directly from annotations so we can compare with what ``to_seq`` returns
onsets_s_from_birdsongrec = []
offsets_s_from_birdsongrec = []
for birdsongrec_seq in birdsongrec.sequences:
onset_samples = np.array([syl.position for syl in birdsongrec_seq.syls])
offset_samples = np.array([syl.position + syl.length for syl in birdsongrec_seq.syls])
wav_filename = birdsongrec.wav_path / birdsongrec_seq.wav_file
samplerate_this_wav = soundfile.info(wav_filename).samplerate
onsets_s_from_birdsongrec.extend(
(onset_samples / samplerate_this_wav).tolist()
)
offsets_s_from_birdsongrec.extend(
(offset_samples / samplerate_this_wav).tolist()
)
assert np.all(
np.allclose(onsets_s_from_to_seq, onsets_s_from_birdsongrec)
)
assert np.all(
np.allclose(offsets_s_from_to_seq, offsets_s_from_birdsongrec)
)
@pytest.mark.parametrize(
ARGNAMES, ARGVALUES
)
def test_to_annot(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path,
concat_seqs_into_songs,
samplerate,
wav_path):
if wav_path is None:
wav_path = birdsong_rec_wav_path
else:
wav_path = test_data_root / wav_path
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=wav_path,
concat_seqs_into_songs=concat_seqs_into_songs)
if not wav_path.exists():
with pytest.warns(UserWarning):
annots = birdsongrec.to_annot(samplerate)
else:
annots = birdsongrec.to_annot(samplerate)
assert isinstance(annots, list)
assert all(
[isinstance(annot, crowsetta.Annotation) for annot in annots]
)
if concat_seqs_into_songs:
n_wavs = len(sorted(birdsong_rec_wav_path.glob('*.wav')))
assert len(annots) == n_wavs
@pytest.mark.parametrize(
'decimals',
[
1,
2,
3,
4,
5,
]
)
def test_to_annot_round_times_true(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path,
decimals):
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=birdsong_rec_wav_path,
concat_seqs_into_songs=True)
annots = birdsongrec.to_annot(round_times=True, decimals=decimals)
onsets_s = [onset_s for annot in annots for onset_s in annot.seq.onsets_s]
offsets_s = [offset_s for annot in annots for offset_s in annot.seq.offsets_s]
assert_rounded_correct_num_decimals(onsets_s, decimals)
assert_rounded_correct_num_decimals(offsets_s, decimals)
def test_to_annot_round_times_false(test_data_root,
birdsong_rec_xml_file,
birdsong_rec_wav_path):
birdsongrec = crowsetta.formats.seq.BirdsongRec.from_file(annot_path=birdsong_rec_xml_file,
wav_path=birdsong_rec_wav_path,
concat_seqs_into_songs=True)
annots = birdsongrec.to_annot(round_times=False)
onsets_s_from_to_annot = [onset_s for annot in annots for onset_s in annot.seq.onsets_s]
offsets_s_from_to_annot = [offset_s for annot in annots for offset_s in annot.seq.offsets_s]
# get directly from annotations so we can compare with what ``to_seq`` returns
onsets_s_from_birdsongrec = []
offsets_s_from_birdsongrec = []
for birdsongrec_seq in birdsongrec.sequences:
onset_samples = np.array([syl.position for syl in birdsongrec_seq.syls])
offset_samples = np.array([syl.position + syl.length for syl in birdsongrec_seq.syls])
wav_filename = birdsongrec.wav_path / birdsongrec_seq.wav_file
samplerate_this_wav = soundfile.info(wav_filename).samplerate
onsets_s_from_birdsongrec.extend(
(onset_samples / samplerate_this_wav).tolist()
)
offsets_s_from_birdsongrec.extend(
(offset_samples / samplerate_this_wav).tolist()
)
assert np.all(
np.allclose(onsets_s_from_to_annot, onsets_s_from_birdsongrec)
)
assert np.all(
np.allclose(offsets_s_from_to_annot, offsets_s_from_birdsongrec)
)
| 39.468468
| 108
| 0.617895
| 1,042
| 8,762
| 4.810941
| 0.105566
| 0.057251
| 0.050269
| 0.068223
| 0.859765
| 0.808897
| 0.805705
| 0.788949
| 0.770197
| 0.770197
| 0
| 0.005643
| 0.312372
| 8,762
| 221
| 109
| 39.647059
| 0.82639
| 0.027163
| 0
| 0.612565
| 0
| 0
| 0.02736
| 0.021254
| 0
| 0
| 0
| 0
| 0.089005
| 1
| 0.036649
| false
| 0
| 0.026178
| 0
| 0.062827
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b2452df8a176f9fcd110149e8790e06c24dae2a
| 2,626
|
py
|
Python
|
intervention/test/test_controller.py
|
tomcur/intervention
|
f8c647819fe6abe0f3972e669d2f7d155f275d55
|
[
"MIT"
] | 4
|
2021-01-12T04:42:03.000Z
|
2022-01-07T07:42:30.000Z
|
intervention/test/test_controller.py
|
Beskhue/intervention
|
f8c647819fe6abe0f3972e669d2f7d155f275d55
|
[
"MIT"
] | null | null | null |
intervention/test/test_controller.py
|
Beskhue/intervention
|
f8c647819fe6abe0f3972e669d2f7d155f275d55
|
[
"MIT"
] | 1
|
2022-01-06T06:01:42.000Z
|
2022-01-06T06:01:42.000Z
|
import math
import unittest
import numpy as np
from .. import controller
class Test(unittest.TestCase):
def test_turning_radius(self):
self.assertEqual(controller._turning_radius_to(1.0, 0.0), 0.5)
self.assertEqual(controller._turning_radius_to(-1.0, 0.0), 0.5)
self.assertEqual(controller._turning_radius_to(1.0, 1.0), 1.0)
self.assertEqual(controller._turning_radius_to(-1.0, 1.0), 1.0)
self.assertEqual(controller._turning_radius_to(0, 1.0), math.inf)
def test_waypoint_interpolation(self):
waypoints_distances_and_targets = [
([[0.5, 0]], 1.0, [1.0, 0]),
([[-0.5, 0]], 1.0, [-1.0, 0]),
([[0.5, 0], [1.5, 0]], 1.0, [1.0, 0]),
([[-0.5, 0], [10.0, 0]], 1.0, [0.0, 0]),
([[0.5, 0], [0.5, 1.0]], 1.0, [0.5, 0.5]),
([[0.3, -0.3]], 20.0, [math.sqrt(20 ** 2 / 2), -math.sqrt(20 ** 2 / 2)]),
]
for waypoints, distance, target in waypoints_distances_and_targets:
tx, ty = target
x, y = controller._interpolate_waypoint_n_meters_ahead(
np.array(waypoints), distance
)
self.assertAlmostEqual(tx, x, places=5)
self.assertAlmostEqual(ty, y, places=5)
def test_waypoint_lookahead(self):
waypoints_distances_and_targets = [
([[0.5, 0]], 1.0, [1.0, 0]),
([[-0.5, 0]], 1.0, [-1.0, 0]),
([[0.5, 0], [1.5, 0]], 1.0, [1.0, 0]),
([[-0.5, 0], [10.0, 0]], 1.0, [1.0, 0]),
([[0.5, 0], [0.5, 0.5]], 1.0, [0.5, math.sqrt(1 ** 2 - 0.5 ** 2)]),
([[0.5, 0], [0.5, 0.3], [-1.5, -0.1]], 1.0, [-1.0, 0.0]),
([[-0.2, 0.5], [-0.2, 1.0]], 1.0, [-0.2, math.sqrt(1 ** 2 - 0.2 ** 2)]),
([[0.2, 0.5], [0.2, 1.0]], 1.0, [0.2, math.sqrt(1 ** 2 - 0.2 ** 2)]),
([[0.2, -0.5], [0.2, -1.0]], 1.0, [0.2, -math.sqrt(1 ** 2 - 0.2 ** 2)]),
(
[[-20, 20], [20, -20], [500, 500]],
10.0,
[-math.sqrt(10 ** 2 / 2), math.sqrt(10 ** 2 / 2)],
),
(
[[-5, 5], [20, -20], [500, 500]],
10.0,
[math.sqrt(10 ** 2 / 2), -math.sqrt(10 ** 2 / 2)],
),
]
for waypoints, distance, target in waypoints_distances_and_targets:
tx, ty = target
x, y = controller._lookahead_trajectory_n_meters_ahead(
np.array(waypoints), distance
)
self.assertAlmostEqual(tx, x, places=5)
self.assertAlmostEqual(ty, y, places=5)
| 40.4
| 85
| 0.460396
| 402
| 2,626
| 2.900498
| 0.124378
| 0.063465
| 0.064322
| 0.054889
| 0.801029
| 0.765009
| 0.753002
| 0.745283
| 0.745283
| 0.740137
| 0
| 0.150616
| 0.319878
| 2,626
| 64
| 86
| 41.03125
| 0.50224
| 0
| 0
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.053571
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b3c69a4d7d528e0d59ab69f2356b7b23642ccc9
| 12,295
|
py
|
Python
|
python-shell/src/test/test_connector.py
|
sw96411/gaffer-tools
|
2dd4ff64cf6afa1dd3f9529977d7170370b11f58
|
[
"Apache-2.0"
] | null | null | null |
python-shell/src/test/test_connector.py
|
sw96411/gaffer-tools
|
2dd4ff64cf6afa1dd3f9529977d7170370b11f58
|
[
"Apache-2.0"
] | null | null | null |
python-shell/src/test/test_connector.py
|
sw96411/gaffer-tools
|
2dd4ff64cf6afa1dd3f9529977d7170370b11f58
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import json
from gafferpy import gaffer as g
from gafferpy import gaffer_connector
class GafferConnectorTest(unittest.TestCase):
def test_execute_operation(self):
gc = gaffer_connector.GafferConnector('http://localhost:8080/rest/latest')
elements = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed('M5:10')
],
view=g.View(
edges=[
g.ElementDefinition(
group='JunctionLocatedAt'
)
]
)
)
)
self.assertEqual(
[g.Edge("JunctionLocatedAt", "M5:10", "390466,225615", True, {},
"SOURCE")],
elements)
def test_is_operation_supported(self):
gc = gaffer_connector.GafferConnector('http://localhost:8080/rest/latest')
response_text = gc.is_operation_supported(
g.IsOperationSupported(
operation='uk.gov.gchq.gaffer.operation.impl.get.GetAllElements'
)
)
expected_response_text = '''
{
"name": "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"summary": "Gets all elements compatible with a provided View",
"fields": [
{
"name": "view",
"className": "uk.gov.gchq.gaffer.data.elementdefinition.view.View",
"required": false
},
{
"name": "options",
"className": "java.util.Map<java.lang.String,java.lang.String>",
"required": false
},
{
"name": "directedType",
"summary": "Is the Edge directed?",
"className": "java.lang.String",
"options": [
"DIRECTED",
"UNDIRECTED",
"EITHER"
],
"required": false
},
{
"name": "views",
"className": "java.util.List<uk.gov.gchq.gaffer.data.elementdefinition.view.View>",
"required": false
}
],
"next": [
"uk.gov.gchq.gaffer.operation.impl.add.AddElements",
"uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet",
"uk.gov.gchq.gaffer.operation.impl.output.ToArray",
"uk.gov.gchq.gaffer.operation.impl.output.ToEntitySeeds",
"uk.gov.gchq.gaffer.operation.impl.output.ToList",
"uk.gov.gchq.gaffer.operation.impl.output.ToMap",
"uk.gov.gchq.gaffer.operation.impl.output.ToCsv",
"uk.gov.gchq.gaffer.operation.impl.output.ToSet",
"uk.gov.gchq.gaffer.operation.impl.output.ToStream",
"uk.gov.gchq.gaffer.operation.impl.output.ToVertices",
"uk.gov.gchq.gaffer.named.operation.NamedOperation",
"uk.gov.gchq.gaffer.operation.impl.compare.Max",
"uk.gov.gchq.gaffer.operation.impl.compare.Min",
"uk.gov.gchq.gaffer.operation.impl.compare.Sort",
"uk.gov.gchq.gaffer.operation.impl.GetWalks",
"uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements",
"uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects",
"uk.gov.gchq.gaffer.operation.impl.Validate",
"uk.gov.gchq.gaffer.operation.impl.Count",
"uk.gov.gchq.gaffer.operation.impl.CountGroups",
"uk.gov.gchq.gaffer.operation.impl.Limit",
"uk.gov.gchq.gaffer.operation.impl.DiscardOutput",
"uk.gov.gchq.gaffer.operation.impl.Map",
"uk.gov.gchq.gaffer.operation.impl.If",
"uk.gov.gchq.gaffer.operation.impl.While",
"uk.gov.gchq.gaffer.operation.impl.ForEach",
"uk.gov.gchq.gaffer.operation.impl.output.ToSingletonList",
"uk.gov.gchq.gaffer.operation.impl.Reduce",
"uk.gov.gchq.gaffer.operation.impl.join.Join",
"uk.gov.gchq.gaffer.operation.impl.SetVariable",
"uk.gov.gchq.gaffer.operation.impl.function.Filter",
"uk.gov.gchq.gaffer.operation.impl.function.Transform",
"uk.gov.gchq.gaffer.operation.impl.function.Aggregate",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsBetweenSets",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsWithinSet",
"uk.gov.gchq.gaffer.operation.impl.SplitStoreFromIterable",
"uk.gov.gchq.gaffer.operation.impl.SampleElementsForSplitPoints",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.SummariseGroupOverRanges",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsInRanges",
"uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherAuthorisedGraph",
"uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache"
],
"exampleJson": {
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
},
"outputClassName": "uk.gov.gchq.gaffer.commonutil.iterable.CloseableIterable<uk.gov.gchq.gaffer.data.element.Element>"
}
'''
self.assertEqual(
json.loads(expected_response_text),
json.loads(response_text)
)
def test_execute_get(self):
self.maxDiff = None
gc = gaffer_connector.GafferConnector('http://localhost:8080/rest/latest')
response_text = gc.execute_get(
g.GetOperations()
)
expected_response_text = '''
[
"uk.gov.gchq.gaffer.operation.impl.add.AddElements",
"uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet",
"uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport",
"uk.gov.gchq.gaffer.operation.impl.export.GetExports",
"uk.gov.gchq.gaffer.operation.impl.job.GetJobDetails",
"uk.gov.gchq.gaffer.operation.impl.job.GetAllJobDetails",
"uk.gov.gchq.gaffer.operation.impl.job.GetJobResults",
"uk.gov.gchq.gaffer.operation.impl.output.ToArray",
"uk.gov.gchq.gaffer.operation.impl.output.ToEntitySeeds",
"uk.gov.gchq.gaffer.operation.impl.output.ToList",
"uk.gov.gchq.gaffer.operation.impl.output.ToMap",
"uk.gov.gchq.gaffer.operation.impl.output.ToCsv",
"uk.gov.gchq.gaffer.operation.impl.output.ToSet",
"uk.gov.gchq.gaffer.operation.impl.output.ToStream",
"uk.gov.gchq.gaffer.operation.impl.output.ToVertices",
"uk.gov.gchq.gaffer.named.operation.NamedOperation",
"uk.gov.gchq.gaffer.named.operation.AddNamedOperation",
"uk.gov.gchq.gaffer.named.operation.GetAllNamedOperations",
"uk.gov.gchq.gaffer.named.operation.DeleteNamedOperation",
"uk.gov.gchq.gaffer.named.view.AddNamedView",
"uk.gov.gchq.gaffer.named.view.GetAllNamedViews",
"uk.gov.gchq.gaffer.named.view.DeleteNamedView",
"uk.gov.gchq.gaffer.operation.impl.compare.Max",
"uk.gov.gchq.gaffer.operation.impl.compare.Min",
"uk.gov.gchq.gaffer.operation.impl.compare.Sort",
"uk.gov.gchq.gaffer.operation.OperationChain",
"uk.gov.gchq.gaffer.operation.OperationChainDAO",
"uk.gov.gchq.gaffer.operation.impl.ValidateOperationChain",
"uk.gov.gchq.gaffer.operation.impl.GetWalks",
"uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements",
"uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects",
"uk.gov.gchq.gaffer.operation.impl.Validate",
"uk.gov.gchq.gaffer.operation.impl.Count",
"uk.gov.gchq.gaffer.operation.impl.CountGroups",
"uk.gov.gchq.gaffer.operation.impl.Limit",
"uk.gov.gchq.gaffer.operation.impl.DiscardOutput",
"uk.gov.gchq.gaffer.store.operation.GetSchema",
"uk.gov.gchq.gaffer.operation.impl.Map",
"uk.gov.gchq.gaffer.operation.impl.If",
"uk.gov.gchq.gaffer.operation.impl.While",
"uk.gov.gchq.gaffer.operation.impl.ForEach",
"uk.gov.gchq.gaffer.operation.impl.output.ToSingletonList",
"uk.gov.gchq.gaffer.operation.impl.Reduce",
"uk.gov.gchq.gaffer.operation.impl.join.Join",
"uk.gov.gchq.gaffer.operation.impl.job.CancelScheduledJob",
"uk.gov.gchq.gaffer.operation.impl.SetVariable",
"uk.gov.gchq.gaffer.operation.impl.GetVariable",
"uk.gov.gchq.gaffer.operation.impl.GetVariables",
"uk.gov.gchq.gaffer.operation.impl.function.Filter",
"uk.gov.gchq.gaffer.operation.impl.function.Transform",
"uk.gov.gchq.gaffer.operation.impl.function.Aggregate",
"uk.gov.gchq.gaffer.store.operation.GetTraits",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsBetweenSets",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsWithinSet",
"uk.gov.gchq.gaffer.operation.impl.SplitStoreFromFile",
"uk.gov.gchq.gaffer.operation.impl.SplitStoreFromIterable",
"uk.gov.gchq.gaffer.operation.impl.SampleElementsForSplitPoints",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.SummariseGroupOverRanges",
"uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsInRanges",
"uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherAuthorisedGraph",
"uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache",
"uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport"
]
'''
self.assertEqual(
json.loads(expected_response_text),
json.loads(response_text)
)
def test_dummy_header(self):
"""Test that the addition of a dummy header does not effect the standard test"""
gc = gaffer_connector.GafferConnector('http://localhost:8080/rest/latest', headers={"dummy_Header": "value"})
elements = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed('M5:10')
],
view=g.View(
edges=[
g.ElementDefinition(
group='JunctionLocatedAt'
)
]
)
)
)
self.assertEqual(
[g.Edge("JunctionLocatedAt", "M5:10", "390466,225615", True, {},
"SOURCE")],
elements)
def test_class_initilisation(self):
"""Test that the gaffer_connector class is correctly initialised with instance attributes"""
host = 'http://localhost:8080/rest/latest',
verbose = False,
headers = {"dummy_Header": "value"}
gc = gaffer_connector.GafferConnector(host, verbose, headers)
actuals = [gc._host, gc._verbose, gc._headers]
expecteds = [host, verbose, headers]
for actual, expected in zip(actuals, expecteds):
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| 45.876866
| 128
| 0.617243
| 1,310
| 12,295
| 5.756489
| 0.184733
| 0.077576
| 0.139637
| 0.232728
| 0.74884
| 0.739557
| 0.709322
| 0.692083
| 0.664766
| 0.656942
| 0
| 0.007395
| 0.252054
| 12,295
| 267
| 129
| 46.048689
| 0.812636
| 0.05856
| 0
| 0.571429
| 0
| 0.012987
| 0.779134
| 0.551515
| 0
| 0
| 0
| 0
| 0.021645
| 1
| 0.021645
| false
| 0
| 0.017316
| 0
| 0.04329
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b4ab62e5bb5cb6bd067463d3dc19f3f9473ccde
| 114
|
py
|
Python
|
src/optimizer.py
|
jayantik/AiCorExample
|
5edbc7343b4f1bccd9ab8bddaa5ac785b2d27782
|
[
"MIT"
] | 4
|
2021-02-12T16:30:53.000Z
|
2021-08-30T02:48:19.000Z
|
src/optimizer.py
|
jayantik/AiCorExample
|
5edbc7343b4f1bccd9ab8bddaa5ac785b2d27782
|
[
"MIT"
] | null | null | null |
src/optimizer.py
|
jayantik/AiCorExample
|
5edbc7343b4f1bccd9ab8bddaa5ac785b2d27782
|
[
"MIT"
] | 2
|
2021-01-17T16:13:03.000Z
|
2021-01-18T11:09:10.000Z
|
import torch
def get(args, parameters):
return getattr(torch.optim, args.optimizer)(parameters, lr=args.lr)
| 19
| 71
| 0.745614
| 16
| 114
| 5.3125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 114
| 5
| 72
| 22.8
| 0.858586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8c7b6a75e7d0218ff4da44d0f59081f89639bff2
| 160
|
py
|
Python
|
scrapers/__init__.py
|
arifer612/MDLPackage
|
7f5b3d66fe4dd1eaf0ee7b2f054707af428109a9
|
[
"MIT"
] | 1
|
2021-06-15T08:52:01.000Z
|
2021-06-15T08:52:01.000Z
|
scrapers/__init__.py
|
arifer612/MDLPackage
|
7f5b3d66fe4dd1eaf0ee7b2f054707af428109a9
|
[
"MIT"
] | 1
|
2022-01-31T06:33:30.000Z
|
2022-02-03T09:58:54.000Z
|
scrapers/__init__.py
|
arifer612/MDLPackage
|
7f5b3d66fe4dd1eaf0ee7b2f054707af428109a9
|
[
"MIT"
] | 1
|
2021-08-12T22:35:09.000Z
|
2021-08-12T22:35:09.000Z
|
from general import configFile
import general
from .main import YouTube
from .Library import database
from .Library import tvOsaka
from .Library import tvTokyo
| 22.857143
| 30
| 0.8375
| 22
| 160
| 6.090909
| 0.454545
| 0.246269
| 0.380597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 160
| 6
| 31
| 26.666667
| 0.971014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8c8db1500449dd70205b5d27e172cbb9da52c189
| 575
|
py
|
Python
|
oslo/__init__.py
|
sooftware/oslo
|
f51d3fd95b3a0341c9d1a7de1df22b3e5a6afd7d
|
[
"Apache-2.0"
] | null | null | null |
oslo/__init__.py
|
sooftware/oslo
|
f51d3fd95b3a0341c9d1a7de1df22b3e5a6afd7d
|
[
"Apache-2.0"
] | null | null | null |
oslo/__init__.py
|
sooftware/oslo
|
f51d3fd95b3a0341c9d1a7de1df22b3e5a6afd7d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 TUNiB Inc.
from oslo.data.datasets.dataset_causal_lm import *
from oslo.data.preprocess.preprocessor import *
from oslo.data.utils.blenders import *
from oslo.data.utils.loaders import *
from oslo.data.utils.samplers import *
from oslo.modeling_utils import *
from oslo.models.gpt2.configuration_gpt2 import *
from oslo.models.gpt2.modeling_gpt2 import *
from oslo.models.gpt_neo.configuration_gpt_neo import *
from oslo.models.gpt_neo.modeling_gpt_neo import *
from oslo.models.gptj.configuration_gptj import *
from oslo.models.gptj.modeling_gptj import *
| 38.333333
| 55
| 0.822609
| 87
| 575
| 5.287356
| 0.287356
| 0.208696
| 0.334783
| 0.26087
| 0.515217
| 0.182609
| 0
| 0
| 0
| 0
| 0
| 0.015355
| 0.093913
| 575
| 14
| 56
| 41.071429
| 0.867562
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cf91bef04c1bb2a2ef697bee3b605e45117ba14
| 21
|
py
|
Python
|
pyanom/__init__.py
|
thunderbug1/pyanom
|
e442bff70a4d1880a9a698c020287edf1933d498
|
[
"MIT"
] | null | null | null |
pyanom/__init__.py
|
thunderbug1/pyanom
|
e442bff70a4d1880a9a698c020287edf1933d498
|
[
"MIT"
] | null | null | null |
pyanom/__init__.py
|
thunderbug1/pyanom
|
e442bff70a4d1880a9a698c020287edf1933d498
|
[
"MIT"
] | null | null | null |
from pyanom import *
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5090995e0e06b2fbc3723588e780d6037fe90072
| 34
|
py
|
Python
|
fabenv/__init__.py
|
GlitchCorp/fabenv
|
bfe2cdef5b08fa7853f1e2a418d2be618d26eb7c
|
[
"MIT"
] | null | null | null |
fabenv/__init__.py
|
GlitchCorp/fabenv
|
bfe2cdef5b08fa7853f1e2a418d2be618d26eb7c
|
[
"MIT"
] | null | null | null |
fabenv/__init__.py
|
GlitchCorp/fabenv
|
bfe2cdef5b08fa7853f1e2a418d2be618d26eb7c
|
[
"MIT"
] | null | null | null |
from .virtualenv import virtualenv
| 34
| 34
| 0.882353
| 4
| 34
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
50c562652407c50fe932aef9605c0ba2ffe4e9c7
| 19,657
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_execute_schedule.py
|
jmswaney/dagster
|
510080abc541250a4b74f6a0ada4484d67d5e037
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_execute_schedule.py
|
jmswaney/dagster
|
510080abc541250a4b74f6a0ada4484d67d5e037
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_execute_schedule.py
|
jmswaney/dagster
|
510080abc541250a4b74f6a0ada4484d67d5e037
|
[
"Apache-2.0"
] | null | null | null |
import time
import uuid
import pytest
from dagster_graphql.test.utils import define_context_for_repository_yaml, execute_dagster_graphql
from dagster import seven
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.scheduler.scheduler import ScheduleTickStatus
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.local_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.core.storage.schedules.sqlite import SqliteScheduleStorage
from dagster.utils import file_relative_path
from .execution_queries import START_SCHEDULED_EXECUTION_QUERY
from .utils import InMemoryRunLauncher
def get_instance(temp_dir):
return DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
schedule_storage=SqliteScheduleStorage.from_local(temp_dir),
compute_log_manager=NoOpComputeLogManager(temp_dir),
)
def get_instance_with_launcher(temp_dir):
test_queue = InMemoryRunLauncher()
return DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
schedule_storage=SqliteScheduleStorage.from_local(temp_dir),
compute_log_manager=NoOpComputeLogManager(temp_dir),
run_launcher=test_queue,
)
def test_basic_start_scheduled_execution():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'no_config_pipeline_hourly_schedule'},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name']
== 'no_config_pipeline'
)
assert any(
tag['key'] == 'dagster/schedule_name'
and tag['value'] == 'no_config_pipeline_hourly_schedule'
for tag in result.data['startScheduledExecution']['run']['tags']
)
def test_basic_start_scheduled_execution_with_run_launcher():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance_with_launcher(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'no_config_pipeline_hourly_schedule'},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data['startScheduledExecution']['__typename'] == 'LaunchPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name']
== 'no_config_pipeline'
)
assert any(
tag['key'] == 'dagster/schedule_name'
and tag['value'] == 'no_config_pipeline_hourly_schedule'
for tag in result.data['startScheduledExecution']['run']['tags']
)
def test_basic_start_scheduled_execution_with_environment_dict_fn():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'no_config_pipeline_hourly_schedule_with_config_fn'},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name']
== 'no_config_pipeline'
)
assert any(
tag['key'] == 'dagster/schedule_name'
and tag['value'] == 'no_config_pipeline_hourly_schedule_with_config_fn'
for tag in result.data['startScheduledExecution']['run']['tags']
)
def test_start_scheduled_execution_with_should_execute():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'no_config_should_execute'},
)
assert not result.errors
assert result.data
assert result.data['startScheduledExecution']['__typename'] == 'ScheduledExecutionBlocked'
def test_partition_based_execution():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context, START_SCHEDULED_EXECUTION_QUERY, variables={'scheduleName': 'partition_based'},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name']
== 'no_config_pipeline'
)
tags = result.data['startScheduledExecution']['run']['tags']
assert any(
tag['key'] == 'dagster/schedule_name' and tag['value'] == 'partition_based'
for tag in tags
)
assert any(tag['key'] == 'dagster/partition' and tag['value'] == '9' for tag in tags)
assert any(
tag['key'] == 'dagster/partition_set' and tag['value'] == 'scheduled_integer_partitions'
for tag in tags
)
result_two = execute_dagster_graphql(
context, START_SCHEDULED_EXECUTION_QUERY, variables={'scheduleName': 'partition_based'},
)
tags = result_two.data['startScheduledExecution']['run']['tags']
# the last partition is selected on subsequent runs
assert any(tag['key'] == 'dagster/partition' and tag['value'] == '9' for tag in tags)
def test_partition_based_custom_selector():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'partition_based_custom_selector'},
)
assert not result.errors
assert result.data
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name']
== 'no_config_pipeline'
)
tags = result.data['startScheduledExecution']['run']['tags']
assert any(
tag['key'] == 'dagster/schedule_name'
and tag['value'] == 'partition_based_custom_selector'
for tag in tags
)
assert any(tag['key'] == 'dagster/partition' and tag['value'] == '9' for tag in tags)
assert any(
tag['key'] == 'dagster/partition_set' and tag['value'] == 'scheduled_integer_partitions'
for tag in tags
)
result_two = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'partition_based_custom_selector'},
)
tags = result_two.data['startScheduledExecution']['run']['tags']
# get a different partition based on the subsequent run storage
assert any(tag['key'] == 'dagster/partition' and tag['value'] == '8' for tag in tags)
def test_partition_based_decorator():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'partition_based_decorator'},
)
assert not result.errors
assert result.data
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
@pytest.mark.parametrize(
'schedule_name',
[
'solid_subset_hourly_decorator',
'solid_subset_daily_decorator',
'solid_subset_monthly_decorator',
'solid_subset_weekly_decorator',
],
)
def test_solid_subset_schedule_decorator(schedule_name):
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context, START_SCHEDULED_EXECUTION_QUERY, variables={'scheduleName': schedule_name},
)
assert not result.errors
assert result.data
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
execution_step_names = [
log['step']['key']
for log in result.data['startScheduledExecution']['run']['logs']['nodes']
if log['__typename'] == 'ExecutionStepStartEvent'
]
assert execution_step_names == ['return_foo.compute']
def test_partition_based_multi_mode_decorator():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'partition_based_multi_mode_decorator'},
)
assert not result.errors
assert result.data
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
# Tests for ticks and execution user error boundary
def test_tick_success():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
repository = context.get_repository()
schedule_handle = context.scheduler_handle
schedule_def = schedule_handle.get_schedule_def_by_name(
"no_config_pipeline_hourly_schedule"
)
start_time = time.time()
execute_dagster_graphql(
context, START_SCHEDULED_EXECUTION_QUERY, variables={'scheduleName': schedule_def.name},
)
ticks = instance.get_schedule_ticks_by_schedule(repository, schedule_def.name)
assert len(ticks) == 1
tick = ticks[0]
assert tick.schedule_name == schedule_def.name
assert tick.cron_schedule == schedule_def.cron_schedule
assert tick.timestamp > start_time and tick.timestamp < time.time()
assert tick.status == ScheduleTickStatus.SUCCESS
assert tick.run_id
def test_tick_skip():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
repository = context.get_repository()
execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'no_config_should_execute'},
)
ticks = instance.get_schedule_ticks_by_schedule(repository, 'no_config_should_execute')
assert len(ticks) == 1
tick = ticks[0]
assert tick.status == ScheduleTickStatus.SKIPPED
def test_should_execute_scheduler_error():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
repository = context.get_repository()
execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'should_execute_error_schedule'},
)
ticks = instance.get_schedule_ticks_by_schedule(repository, 'should_execute_error_schedule')
assert len(ticks) == 1
tick = ticks[0]
assert tick.status == ScheduleTickStatus.FAILURE
assert tick.error
assert (
"Error occurred during the execution should_execute for schedule "
"should_execute_error_schedule" in tick.error.message
)
def test_tags_scheduler_error():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
repository = context.get_repository()
execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'tags_error_schedule'},
)
ticks = instance.get_schedule_ticks_by_schedule(repository, 'tags_error_schedule')
assert len(ticks) == 1
tick = ticks[0]
assert tick.status == ScheduleTickStatus.FAILURE
assert tick.error
assert (
"Error occurred during the execution of tags_fn for schedule tags_error_schedule"
in tick.error.message
)
def test_enviornment_dict_scheduler_error():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
repository = context.get_repository()
execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'environment_dict_error_schedule'},
)
ticks = instance.get_schedule_ticks_by_schedule(
repository, 'environment_dict_error_schedule'
)
assert len(ticks) == 1
tick = ticks[0]
assert tick.status == ScheduleTickStatus.FAILURE
assert tick.error
assert (
"Error occurred during the execution of environment_dict_fn for schedule "
"environment_dict_error_schedule" in tick.error.message
)
def test_tagged_pipeline_schedule():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'tagged_pipeline_schedule'},
)
assert not result.errors
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert (
result.data['startScheduledExecution']['run']['pipeline']['name'] == 'tagged_pipeline'
)
assert any(
tag['key'] == 'foo' and tag['value'] == 'bar'
for tag in result.data['startScheduledExecution']['run']['tags']
)
def test_tagged_pipeline_override_schedule():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'tagged_pipeline_override_schedule'},
)
assert not result.errors
assert (
result.data['startScheduledExecution']['__typename'] == 'StartPipelineExecutionSuccess'
)
assert (
result.data['startScheduledExecution']['run']['pipeline']['name'] == 'tagged_pipeline'
)
assert not any(
tag['key'] == 'foo' and tag['value'] == 'bar'
for tag in result.data['startScheduledExecution']['run']['tags']
)
assert any(
tag['key'] == 'foo' and tag['value'] == 'notbar'
for tag in result.data['startScheduledExecution']['run']['tags']
)
def test_tagged_pipeline_scheduled_execution_with_run_launcher():
with seven.TemporaryDirectory() as temp_dir:
instance = get_instance_with_launcher(temp_dir)
context = define_context_for_repository_yaml(
path=file_relative_path(__file__, '../repository.yaml'), instance=instance
)
result = execute_dagster_graphql(
context,
START_SCHEDULED_EXECUTION_QUERY,
variables={'scheduleName': 'tagged_pipeline_schedule'},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data['startScheduledExecution']['__typename'] == 'LaunchPipelineExecutionSuccess'
)
assert uuid.UUID(result.data['startScheduledExecution']['run']['runId'])
assert (
result.data['startScheduledExecution']['run']['pipeline']['name'] == 'tagged_pipeline'
)
assert any(
tag['key'] == 'foo' and tag['value'] == 'bar'
for tag in result.data['startScheduledExecution']['run']['tags']
)
| 35.74
| 100
| 0.652083
| 1,920
| 19,657
| 6.334375
| 0.08125
| 0.037823
| 0.097681
| 0.071041
| 0.84542
| 0.833004
| 0.830538
| 0.820671
| 0.795675
| 0.788111
| 0
| 0.00095
| 0.250343
| 19,657
| 549
| 101
| 35.8051
| 0.824376
| 0.013278
| 0
| 0.629291
| 0
| 0
| 0.20261
| 0.119513
| 0
| 0
| 0
| 0
| 0.19222
| 1
| 0.043478
| false
| 0
| 0.034325
| 0.002288
| 0.08238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50ced8b324e4f4a1c78dca7beda3b54662b1429e
| 16,077
|
py
|
Python
|
tests/test_class2.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | 14
|
2020-01-24T21:52:51.000Z
|
2021-05-24T01:58:08.000Z
|
tests/test_class2.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | null | null | null |
tests/test_class2.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | 26
|
2020-03-29T20:17:29.000Z
|
2022-03-28T19:13:40.000Z
|
import os
import re
import pytest
from pathlib import Path
from utilities import subprocess_runner, remove_ansible_warnings
TEST_CASES = [
"../class2/collateral/cli_command/cli_command_1.yml",
"../class2/collateral/eos_command/eos_example_1.yml",
"../class2/collateral/eos_command/eos_example_2.yml",
"../class2/collateral/eos_command/eos_example_3.yml",
"../class2/collateral/eos_command/eos_example_4.yml",
"../class2/collateral/eos_command/eos_example_5.yml",
"../class2/collateral/eos_command/eos_example_6.yml",
"../class2/collateral/hostvars/test1/simple_pb1.yml",
"../class2/collateral/hostvars/test1/simple_pb2.yml",
"../class2/collateral/hostvars/test2/simple_pb2.yml",
"../class2/collateral/ios_command/ios_example_1.yml",
"../class2/collateral/ios_command/ios_example_2.yml",
"../class2/collateral/ios_command/ios_example_3.yml",
"../class2/collateral/ios_command/ios_example_4.yml",
"../class2/collateral/ios_command/ios_example_5.yml",
"../class2/collateral/ios_command/ios_example_6.yml",
"../class2/collateral/ios_command/ios_example_7.yml",
"../class2/collateral/ios_command/ios_example_8.yml",
"../class2/collateral/modules/my_modules_1.yml",
"../class2/collateral/priv_escalation/enable.yml",
"../class2/collateral/priv_escalation/enable_2.yml",
"../class2/collateral/setfact/simple_pb.yml",
# "../class2/collateral/setfact/test_prompt.yml",
"../class2/collateral/variables/simple_pb.yml",
"../class2/collateral/variables/simple_pb_1.yml",
]
@pytest.mark.parametrize("test_case", TEST_CASES)
def test_runner_collateral(test_case):
path_obj = Path(test_case)
script = path_obj.name
script_dir = path_obj.parents[0]
cmd_list = ["ansible-playbook", script]
std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)
std_err = remove_ansible_warnings(std_err)
assert return_code == 0
assert std_err == ""
def test_class2_ex1a():
base_path = "../class2/exercises/exercise1"
cmd_list = ["ansible-playbook", "exercise1a.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("10.220.88.32") == 3
assert (
"arista5 : ok=3 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
def test_class2_ex1b():
base_path = "../class2/exercises/exercise1"
cmd_list = ["ansible-playbook", "exercise1b.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("10.220.88.32") == 3
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert (
"arista5 : ok=5 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
std_err = remove_ansible_warnings(std_err)
assert std_err.strip() == ""
assert return_code == 0
def test_class2_ex1c():
base_path = "../class2/exercises/exercise1/exercise1c"
cmd_list = ["ansible-playbook", "exercise1c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("10.220.88.32") == 3
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"ansible_network_os": "eos"' in std_out
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"desired_eos_version": "4.18.3"'
assert (
"arista5 : ok=6 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
std_err = remove_ansible_warnings(std_err)
assert std_err.strip() == ""
assert return_code == 0
def test_class2_ex1d():
base_path = "../class2/exercises/exercise1/exercise1d"
cmd_list = ["ansible-playbook", "exercise1d.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("10.220.88.32") == 3
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"ansible_network_os": "eos"' in std_out
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"desired_eos_version": "4.21.1"'
assert (
"arista5 : ok=6 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
std_err = remove_ansible_warnings(std_err)
assert std_err.strip() == ""
assert return_code == 0
def test_class2_ex1e():
base_path = "../class2/exercises/exercise1/exercise1e"
cmd_list = ["ansible-playbook", "exercise1e.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("10.220.88.32") == 3
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"ansible_network_os": "eos"' in std_out
assert '"ansible_host": "arista5.lasthop.io"' in std_out
assert '"desired_eos_version": "4.21.1"'
assert '"device_hostname": "arista5.lab.io"'
assert (
"arista5 : ok=8 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
std_err = remove_ansible_warnings(std_err)
assert std_err.strip() == ""
assert return_code == 0
def test_class2_ex2a():
base_path = "../class2/exercises/exercise2/exercise2a"
cmd_list = ["ansible-playbook", "exercise2a.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert "The ASN for host cisco1 is 65001" in std_out
assert "The ASN for host cisco2 is 65001" in std_out
assert "The ASN for host cisco5 is 65001" in std_out
assert "The ASN for host cisco6 is 65001" in std_out
assert (
"cisco1 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco2 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco5 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco6 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex2b():
base_path = "../class2/exercises/exercise2/exercise2b"
cmd_list = ["ansible-playbook", "exercise2b.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert "The ASN for host cisco1 is 65001" in std_out
assert "The ASN for host cisco2 is 65001" in std_out
assert "The ASN for host cisco5 is 65535" in std_out
assert "The ASN for host cisco6 is 65001" in std_out
assert (
"cisco1 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco2 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco5 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco6 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex2c():
base_path = "../class2/exercises/exercise2/exercise2c"
cmd_list = ["ansible-playbook", "exercise2c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert "The ASN for host cisco1 is 65001, the router-id is 1.1.1.1" in std_out
assert "The ASN for host cisco2 is 65001, the router-id is 2.2.2.2" in std_out
assert "The ASN for host cisco5 is 65535, the router-id is 5.5.5.5" in std_out
assert "The ASN for host cisco6 is 65001, the router-id is 6.6.6.6" in std_out
assert (
"cisco1 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco2 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco5 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"cisco6 : ok=1 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex3a():
base_path = "../class2/exercises/exercise3"
cmd_list = ["ansible-playbook", "exercise3a.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count('" NXOS: version 9.2(3)",') == 2
assert (
"nxos1 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"nxos2 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex3b():
base_path = "../class2/exercises/exercise3"
cmd_list = ["ansible-playbook", "exercise3b.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count('" NXOS: version 9.2(3)",') == 2
assert (
std_out.count('"Flags: * - Adjacencies learnt on non-active FHRP router"') == 2
)
assert (
"nxos1 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"nxos2 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex3c():
base_path = "../class2/exercises/exercise3"
cmd_list = ["ansible-playbook", "exercise3c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("Total entries displayed:") == 2
assert (
"nxos1 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"nxos2 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex3d():
base_path = "../class2/exercises/exercise3/exercise3d"
cmd_list = [
"ansible-playbook",
"exercise3d.yml",
"-e",
f"ansible_ssh_pass={os.environ['ANSIBLE_PASSWORD']}",
]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("Total entries displayed: ") == 2
assert (
"nxos1 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"nxos2 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex4a():
base_path = "../class2/exercises/exercise4"
cmd_list = [
"ansible-playbook",
"exercise4.yml",
"-e",
f"ansible_ssh_pass={os.environ['ANSIBLE_PASSWORD']}",
]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("Clear logging buffer [confirm]") == 2
assert (
"cisco6 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex5a():
base_path = "../class2/exercises/exercise5"
cmd_list = ["ansible-playbook", "exercise5a.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert re.search(r"fxp0.0\s+up\s+up.*172.30.0.221/24", std_out)
assert re.search(r"fxp0.0\s+up\s+up.*172.30.0.156/24", std_out)
assert re.search(r"^vmx1.*ok=2.*failed=0", std_out, flags=re.M)
assert re.search(r"^vmx2.*ok=2.*failed=0", std_out, flags=re.M)
assert std_err == ""
assert return_code == 0
def test_class2_ex5b():
base_path = "../class2/exercises/exercise5"
cmd_list = ["ansible-playbook", "exercise5b.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert re.search(r"fxp0.0.*172.30.0.221/24", std_out)
assert re.search(r"fxp0.0.*172.30.0.156/24", std_out)
assert re.search(r"^vmx1.*ok=2.*failed=0", std_out, flags=re.M)
assert re.search(r"^vmx2.*ok=2.*failed=0", std_out, flags=re.M)
assert std_err == ""
assert return_code == 0
def test_class2_ex5c():
base_path = "../class2/exercises/exercise5"
cmd_list = ["ansible-playbook", "exercise5c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert re.search(r"Primary IP.*172.30.0.221/24", std_out)
assert re.search(r"Primary IP.*172.30.0.156/24", std_out)
assert re.search(r"^vmx1.*ok=3.*failed=0", std_out, flags=re.M)
assert re.search(r"^vmx2.*ok=3.*failed=0", std_out, flags=re.M)
assert std_err == ""
assert return_code == 0
def test_class2_ex6a():
base_path = "../class2/exercises/exercise6"
cmd_list = ["ansible-playbook", "exercise6a.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert std_out.count("Address Age (min) Hardware Addr Interface") == 8
assert (
"arista5 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista6 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista7 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista8 : ok=2 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
def test_class2_ex6b():
base_path = "../class2/exercises/exercise6"
cmd_list = ["ansible-playbook", "exercise6b.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
assert (
"arista5 : ok=4 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista6 : ok=4 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista7 : ok=4 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert (
"arista8 : ok=4 changed=0 unreachable=0 failed=0 "
"skipped=0 rescued=0 ignored=0" in std_out
)
assert std_err == ""
assert return_code == 0
| 38.553957
| 89
| 0.611681
| 2,185
| 16,077
| 4.297025
| 0.089245
| 0.063266
| 0.072851
| 0.076046
| 0.870806
| 0.8396
| 0.810203
| 0.760358
| 0.747683
| 0.688252
| 0
| 0.059614
| 0.268582
| 16,077
| 416
| 90
| 38.646635
| 0.738838
| 0.002923
| 0
| 0.573864
| 0
| 0.005682
| 0.48983
| 0.139568
| 0
| 0
| 0
| 0
| 0.346591
| 1
| 0.053977
| false
| 0.005682
| 0.014205
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ba02fb9cd61810e4a5811e167113519e5da23f11
| 134
|
py
|
Python
|
automl_alex/__init__.py
|
chrinide/AutoML_Alex
|
961fb2b4ff0864f6a0c35b4fcbd2fbe666fbc5e3
|
[
"MIT"
] | 1
|
2020-07-20T14:32:14.000Z
|
2020-07-20T14:32:14.000Z
|
automl_alex/__init__.py
|
chrinide/AutoML_Alex
|
961fb2b4ff0864f6a0c35b4fcbd2fbe666fbc5e3
|
[
"MIT"
] | null | null | null |
automl_alex/__init__.py
|
chrinide/AutoML_Alex
|
961fb2b4ff0864f6a0c35b4fcbd2fbe666fbc5e3
|
[
"MIT"
] | null | null | null |
from .models import *
from .automl_alex import *
from .databunch import *
from .encoders import *
from .__version__ import __version__
| 26.8
| 36
| 0.791045
| 17
| 134
| 5.705882
| 0.470588
| 0.412371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141791
| 134
| 5
| 36
| 26.8
| 0.843478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba05295b7fe9a6bb02da83ab59cc6122ea9621b3
| 122
|
py
|
Python
|
mneia_admin_backend/models/person_work_relationship.py
|
mneia-gr/mneia-admin-backend
|
ab1c1f55f599d8d1919930717c979c3973c821d0
|
[
"CC0-1.0"
] | null | null | null |
mneia_admin_backend/models/person_work_relationship.py
|
mneia-gr/mneia-admin-backend
|
ab1c1f55f599d8d1919930717c979c3973c821d0
|
[
"CC0-1.0"
] | null | null | null |
mneia_admin_backend/models/person_work_relationship.py
|
mneia-gr/mneia-admin-backend
|
ab1c1f55f599d8d1919930717c979c3973c821d0
|
[
"CC0-1.0"
] | null | null | null |
from mneia_admin_backend.models import abstract
class PersonWorkRelationship(abstract.PersonWorkRelationship):
pass
| 20.333333
| 62
| 0.852459
| 12
| 122
| 8.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106557
| 122
| 5
| 63
| 24.4
| 0.93578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ba177084b405f2ea388bfbbe13c2c4df85cec1bf
| 32
|
py
|
Python
|
modules/datasets/__init__.py
|
pgmikhael/NutriNet
|
f11a0e013479b25a010df4c65f4ef16aa74963d8
|
[
"Apache-2.0"
] | null | null | null |
modules/datasets/__init__.py
|
pgmikhael/NutriNet
|
f11a0e013479b25a010df4c65f4ef16aa74963d8
|
[
"Apache-2.0"
] | null | null | null |
modules/datasets/__init__.py
|
pgmikhael/NutriNet
|
f11a0e013479b25a010df4c65f4ef16aa74963d8
|
[
"Apache-2.0"
] | null | null | null |
import modules.datasets.recipes
| 16
| 31
| 0.875
| 4
| 32
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 32
| 1
| 32
| 32
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e84f97a4ddc8376f12722aba7512c09e6cece626
| 2,333
|
py
|
Python
|
tests/test_query_snapshot.py
|
tellor-io/telliot-core
|
e2b6cb3486e1aa796bd4d14147bd18d300191492
|
[
"MIT"
] | 9
|
2021-12-15T07:03:34.000Z
|
2022-03-30T20:16:45.000Z
|
tests/test_query_snapshot.py
|
tellor-io/telliot-core
|
e2b6cb3486e1aa796bd4d14147bd18d300191492
|
[
"MIT"
] | 76
|
2021-11-11T10:06:11.000Z
|
2022-03-30T18:50:48.000Z
|
tests/test_query_snapshot.py
|
tellor-io/telliot-core
|
e2b6cb3486e1aa796bd4d14147bd18d300191492
|
[
"MIT"
] | 7
|
2021-12-17T03:39:23.000Z
|
2022-03-29T08:53:43.000Z
|
""" Unit tests for Snapshot queries
Copyright (c) 2021-, Tellor Development Community
Distributed under the terms of the MIT License.
"""
from eth_abi import decode_abi
from telliot_core.queries.snapshot import Snapshot
def test_constructor():
"""Validate snapshot query."""
q = Snapshot(proposal_id="QmbZ6cYVvfoKvkDX14jRcN86z6bfV135npUfhxmENjHnQ1")
exp = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Snapshot\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00.QmbZ6cYVvfoKvkDX14jRcN86z6bfV135npUfhxmENjHnQ1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # noqa: E501
assert q.query_data == exp
query_type, encoded_param_vals = decode_abi(["string", "bytes"], q.query_data)
assert query_type == "Snapshot"
proposal_id = decode_abi(["string"], encoded_param_vals)[0]
assert isinstance(proposal_id, str)
assert proposal_id == "QmbZ6cYVvfoKvkDX14jRcN86z6bfV135npUfhxmENjHnQ1"
exp = "6ec98c95cf3aec7866c0fd1617c62e779a494ed49e689f578e14a5a0a0d99349"
assert q.query_id.hex() == exp
def test_encode_decode_reported_val():
"""Ensure expected encoding/decoding behavior."""
q = Snapshot(proposal_id="aDd6cYVvfoKvkDX14jRcN86z6bfV135npUfhxmENjHnQ1")
# An array of values representing the amount of votes (uints) for each vote option
votes = [500, 10, 35]
submit_value = q.value_type.encode(votes)
assert isinstance(submit_value, bytes)
decoded_votes = list(q.value_type.decode(submit_value))
assert isinstance(decoded_votes, list)
assert decoded_votes[2] == 35
| 53.022727
| 1,008
| 0.752679
| 391
| 2,333
| 4.409207
| 0.207161
| 0.776102
| 1.138051
| 1.482599
| 0.400232
| 0.400232
| 0.400232
| 0.400232
| 0.400232
| 0.400232
| 0
| 0.265123
| 0.093013
| 2,333
| 43
| 1,009
| 54.255814
| 0.549622
| 0.125161
| 0
| 0
| 0
| 0.047619
| 0.596934
| 0.584075
| 0
| 0
| 0
| 0
| 0.380952
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e88f6cd7ddfdc87118e402ea43ef2e2881452423
| 35
|
py
|
Python
|
logiq/creations/__init__.py
|
Bnz-0/logiq
|
5b7c4cf894f00aa5648192f9c4bece6a45c9f894
|
[
"MIT"
] | 1
|
2019-12-04T13:45:14.000Z
|
2019-12-04T13:45:14.000Z
|
logiq/creations/__init__.py
|
Bnz-0/logiq
|
5b7c4cf894f00aa5648192f9c4bece6a45c9f894
|
[
"MIT"
] | null | null | null |
logiq/creations/__init__.py
|
Bnz-0/logiq
|
5b7c4cf894f00aa5648192f9c4bece6a45c9f894
|
[
"MIT"
] | null | null | null |
from . import algorithms, protocols
| 35
| 35
| 0.828571
| 4
| 35
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e89056cd12207811e2589eb66cf04629a00e081b
| 7,124
|
py
|
Python
|
examples/pytorch/graphsaint/config.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
examples/pytorch/graphsaint/config.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
examples/pytorch/graphsaint/config.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
CONFIG={
'ppi_n':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'ppi', 'dropout': 0, 'edge_budget': 4000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 6000,
'num_subg': 50, 'num_roots': 3000, 'sampler': 'node', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'ppi_e':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'ppi', 'dropout': 0.1, 'edge_budget': 4000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 6000,
'num_subg': 50, 'num_roots': 3000, 'sampler': 'edge', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'ppi_rw':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'ppi', 'dropout': 0.1, 'edge_budget': 4000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 6000,
'num_subg': 50, 'num_roots': 3000, 'sampler': 'rw', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'flickr_n':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'flickr', 'dropout': 0.2, 'edge_budget': 6000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 256, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 25, 'num_roots': 6000, 'sampler': 'node', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': False
},
'flickr_e':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'flickr', 'dropout': 0.2, 'edge_budget': 6000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 256, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 25, 'num_roots': 6000, 'sampler': 'edge', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': False
},
'flickr_rw':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'flickr', 'dropout': 0.2, 'edge_budget': 6000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 50, 'n_hidden': 256, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 25, 'num_roots': 6000, 'sampler': 'rw', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 0,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': False
},
'reddit_n':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'reddit', 'dropout': 0.1, 'edge_budget': 4000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 20, 'n_hidden': 128, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 50, 'num_roots': 3000, 'sampler': 'node', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'reddit_e':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'reddit', 'dropout': 0.1, 'edge_budget': 6000, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 20, 'n_hidden': 128, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 50, 'num_roots': 3000, 'sampler': 'edge', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'reddit_rw':
{
'aggr': 'concat', 'arch': '1-0-1-0', 'dataset': 'reddit', 'dropout': 0.1, 'edge_budget': 6000, 'length': 4,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 10, 'n_hidden': 128, 'no_batch_norm': False, 'node_budget': 8000,
'num_subg': 50, 'num_roots': 200, 'sampler': 'rw', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'yelp_n':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'yelp', 'dropout': 0.1, 'edge_budget': 6000, 'length': 4,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 10, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 5000,
'num_subg': 50, 'num_roots': 200, 'sampler': 'node', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'yelp_e':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'yelp', 'dropout': 0.1, 'edge_budget': 2500, 'length': 4,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 10, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 5000,
'num_subg': 50, 'num_roots': 200, 'sampler': 'edge', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'yelp_rw':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'yelp', 'dropout': 0.1, 'edge_budget': 2500, 'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 10, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 5000,
'num_subg': 50, 'num_roots': 1250, 'sampler': 'rw', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 8,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'amazon_n':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'amazon', 'dropout': 0.1, 'edge_budget': 2500, 'length': 4,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 5, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 4500,
'num_subg': 50, 'num_roots': 200, 'sampler': 'node', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 4,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
},
'amazon_e':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'amazon', 'dropout': 0.1, 'edge_budget': 2000, 'gpu': 0,'length': 4,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 10, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 5000,
'num_subg': 50, 'num_roots': 200, 'sampler': 'edge', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 20,
'num_subg_sampler': 5000, 'batch_size_sampler': 50, 'num_workers': 26, 'full': True
},
'amazon_rw':
{
'aggr': 'concat', 'arch': '1-1-0', 'dataset': 'amazon', 'dropout': 0.1, 'edge_budget': 2500, 'gpu': 0,'length': 2,
'log_dir': 'none', 'lr': 0.01, 'n_epochs': 5, 'n_hidden': 512, 'no_batch_norm': False, 'node_budget': 5000,
'num_subg': 50, 'num_roots': 1500, 'sampler': 'rw', 'use_val': True, 'val_every': 1, 'num_workers_sampler': 4,
'num_subg_sampler': 10000, 'batch_size_sampler': 200, 'num_workers': 8, 'full': True
}
}
| 57.918699
| 122
| 0.578327
| 1,030
| 7,124
| 3.723301
| 0.065049
| 0.054759
| 0.054759
| 0.05867
| 0.971578
| 0.971578
| 0.971578
| 0.970795
| 0.967666
| 0.96558
| 0
| 0.101916
| 0.194273
| 7,124
| 122
| 123
| 58.393443
| 0.566202
| 0
| 0
| 0.28972
| 0
| 0
| 0.479854
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8a065657640fc54ef973a1d4be400f0b0a0795c
| 48
|
py
|
Python
|
torch2trt/contrib/qat/__init__.py
|
PogChamper/torch2trt
|
43b12627ec0de4d212efb6d02b07570205085ccc
|
[
"MIT"
] | 3,363
|
2019-06-21T04:43:02.000Z
|
2022-03-31T20:08:31.000Z
|
torch2trt/contrib/qat/__init__.py
|
maronuu/torch2trt
|
311f328cd45799ad8d72f1bebcc818d71c301f62
|
[
"MIT"
] | 592
|
2019-06-24T08:25:55.000Z
|
2022-03-31T06:37:37.000Z
|
torch2trt/contrib/qat/__init__.py
|
maronuu/torch2trt
|
311f328cd45799ad8d72f1bebcc818d71c301f62
|
[
"MIT"
] | 606
|
2019-06-23T04:16:38.000Z
|
2022-03-31T09:22:15.000Z
|
from .converters import *
from .layers import *
| 16
| 25
| 0.75
| 6
| 48
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 26
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.