hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
b666301f2e73ca7da7fa6cb1deb62ced6adcb6c0
160
py
Python
codesync/codesyncd/__init__.py
codesyncapp/codesync
6521291fbcc6ccbbe17a07bf24d76a45f9e88d2c
[ "MIT" ]
1
2022-02-09T23:53:40.000Z
2022-02-09T23:53:40.000Z
codesync/codesyncd/__init__.py
codesyncapp/codesync
6521291fbcc6ccbbe17a07bf24d76a45f9e88d2c
[ "MIT" ]
1
2021-08-03T12:29:01.000Z
2021-08-03T12:29:01.000Z
codesync/codesyncd/__init__.py
codesyncapp/codesync
6521291fbcc6ccbbe17a07bf24d76a45f9e88d2c
[ "MIT" ]
null
null
null
""" Init process to sync the repo/branch """ from .utils import * from .populate_buffer import * from .handle_buffer import * from .codesyncd import run_daemon
20
36
0.7625
23
160
5.173913
0.695652
0.252101
0.268908
0
0
0
0
0
0
0
0
0
0.15
160
7
37
22.857143
0.875
0.225
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
b6b4b2fee8b0546303efa96d19bc4cc7b32c4ca8
3,588
py
Python
tests/memory_threshold_check_test.py
liuh-80/sonic-utilities
3d3c89bd75e3c70881c64e2a59043177c56111b4
[ "Apache-2.0" ]
null
null
null
tests/memory_threshold_check_test.py
liuh-80/sonic-utilities
3d3c89bd75e3c70881c64e2a59043177c56111b4
[ "Apache-2.0" ]
null
null
null
tests/memory_threshold_check_test.py
liuh-80/sonic-utilities
3d3c89bd75e3c70881c64e2a59043177c56111b4
[ "Apache-2.0" ]
null
null
null
import os import sys import pytest from unittest import mock from .mock_tables import dbconnector from utilities_common.general import load_module_from_source test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, 'scripts') sys.path.insert(0, scripts_path) memory_threshold_check_path = os.path.join(scripts_path, 'memory_threshold_check.py') memory_threshold_check = load_module_from_source('memory_threshold_check.py', memory_threshold_check_path) @pytest.fixture() def setup_dbs_regular_mem_usage(): cfg_db = dbconnector.dedicated_dbs.get('CONFIG_DB') state_db = dbconnector.dedicated_dbs.get('STATE_DB') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(test_path, 'memory_threshold_check', 'config_db') dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(test_path, 'memory_threshold_check', 'state_db') yield dbconnector.dedicated_dbs['CONFIG_DB'] = cfg_db dbconnector.dedicated_dbs['STATE_DB'] = state_db @pytest.fixture() def setup_dbs_telemetry_high_mem_usage(): memory_threshold_check.MemoryStats.get_sys_memory_stats = mock.Mock(return_value={'MemAvailable': 10000000, 'MemTotal': 20000000}) cfg_db = dbconnector.dedicated_dbs.get('CONFIG_DB') state_db = dbconnector.dedicated_dbs.get('STATE_DB') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(test_path, 'memory_threshold_check', 'config_db') dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(test_path, 'memory_threshold_check', 'state_db_2') yield dbconnector.dedicated_dbs['CONFIG_DB'] = cfg_db dbconnector.dedicated_dbs['STATE_DB'] = state_db @pytest.fixture() def setup_dbs_swss_high_mem_usage(): memory_threshold_check.MemoryStats.get_sys_memory_stats = mock.Mock(return_value={'MemAvailable': 10000000, 'MemTotal': 20000000}) cfg_db = dbconnector.dedicated_dbs.get('CONFIG_DB') state_db = dbconnector.dedicated_dbs.get('STATE_DB') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(test_path, 'memory_threshold_check', 'config_db') dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(test_path, 'memory_threshold_check', 'state_db_3') yield dbconnector.dedicated_dbs['CONFIG_DB'] = cfg_db dbconnector.dedicated_dbs['STATE_DB'] = state_db def test_memory_check_host_not_crossed(setup_dbs_regular_mem_usage): memory_threshold_check.MemoryStats.get_sys_memory_stats = mock.Mock(return_value={'MemAvailable': 1000000, 'MemTotal': 2000000}) assert memory_threshold_check.main() == (memory_threshold_check.EXIT_SUCCESS, '') def test_memory_check_host_less_then_min_required(setup_dbs_regular_mem_usage): memory_threshold_check.MemoryStats.get_sys_memory_stats = mock.Mock(return_value={'MemAvailable': 1000, 'MemTotal': 2000000}) assert memory_threshold_check.main() == (memory_threshold_check.EXIT_THRESHOLD_CROSSED, '') def test_memory_check_host_threshold_crossed(setup_dbs_regular_mem_usage): memory_threshold_check.MemoryStats.get_sys_memory_stats = mock.Mock(return_value={'MemAvailable': 2000000, 'MemTotal': 20000000}) assert memory_threshold_check.main() == (memory_threshold_check.EXIT_THRESHOLD_CROSSED, '') def test_memory_check_telemetry_threshold_crossed(setup_dbs_telemetry_high_mem_usage): assert memory_threshold_check.main() == (memory_threshold_check.EXIT_THRESHOLD_CROSSED, 'telemetry') def test_memory_check_swss_threshold_crossed(setup_dbs_swss_high_mem_usage): assert memory_threshold_check.main() == (memory_threshold_check.EXIT_THRESHOLD_CROSSED, 'swss')
49.833333
134
0.801561
493
3,588
5.373225
0.141988
0.147225
0.1963
0.141563
0.832012
0.783314
0.767459
0.735749
0.735749
0.735749
0
0.022964
0.089744
3,588
71
135
50.535211
0.78812
0
0
0.462963
0
0
0.14214
0.050725
0
0
0
0
0.092593
1
0.148148
false
0
0.111111
0
0.259259
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
fcc1ce901476e289f38ff6b986990739c3eaccaa
107
py
Python
python/tjauto/cli_util.py
jtong/tjauto
ebe3571f500ab187af84c626fccba055f95a992e
[ "MIT" ]
null
null
null
python/tjauto/cli_util.py
jtong/tjauto
ebe3571f500ab187af84c626fccba055f95a992e
[ "MIT" ]
null
null
null
python/tjauto/cli_util.py
jtong/tjauto
ebe3571f500ab187af84c626fccba055f95a992e
[ "MIT" ]
null
null
null
import os def goto_and_exec(location_path, next_cmd): os.system("cd "+location_path+" && "+next_cmd)
17.833333
50
0.71028
17
107
4.117647
0.705882
0.342857
0.457143
0.542857
0
0
0
0
0
0
0
0
0.140187
107
5
51
21.4
0.76087
0
0
0
0
0
0.066038
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
0
0
0
7
fcfc3907d4d25b7690f1ad579b148afd35eb3544
174
py
Python
aoutil/__init__.py
a24ma/aoutil
72d6acb93aea2e9fa36912938c932b025f40e277
[ "MIT" ]
null
null
null
aoutil/__init__.py
a24ma/aoutil
72d6acb93aea2e9fa36912938c932b025f40e277
[ "MIT" ]
null
null
null
aoutil/__init__.py
a24ma/aoutil
72d6acb93aea2e9fa36912938c932b025f40e277
[ "MIT" ]
null
null
null
from aoutil.now import now, ts from aoutil.log import setup_detail_logger, setup_simple_logger, test_log from aoutil.log import NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
43.5
73
0.821839
28
174
4.928571
0.607143
0.217391
0.188406
0.275362
0
0
0
0
0
0
0
0
0.114943
174
3
74
58
0.896104
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
1e22de507ca4eafff100b3273f32d40938b21621
28,100
py
Python
BIGDATA/hadoop.py
KranNaut/Dash
dfddf1c6a0e84761df2216a576f46cb1e5138bd8
[ "MIT" ]
null
null
null
BIGDATA/hadoop.py
KranNaut/Dash
dfddf1c6a0e84761df2216a576f46cb1e5138bd8
[ "MIT" ]
null
null
null
BIGDATA/hadoop.py
KranNaut/Dash
dfddf1c6a0e84761df2216a576f46cb1e5138bd8
[ "MIT" ]
null
null
null
import os import getpass import subprocess ############################################################### def LocalHadoopInstall(): status = 1 print("Please wait") if not subprocess.getstatusoutput("sudo yum install initscripts -y")[0]: if not subprocess.getstatusoutput("pip3 install gdown")[0]: if not os.system( "sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/hadoop-1.2.1-1.x86_64.rpm") and 0 == os.system( "sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/jdk-8u171-linux-x64.rpm"): os.system("sudo rpm -ihv jdk-8u171-linux-x64.rpm") status = os.system("sudo rpm -ihv hadoop-1.2.1-1.x86_64.rpm --force") if status == 0: print("Hadoop successfully installed in your system") else: print("something went wrong please contact support team") ################################################################# def LocalNodeConfigure(current_type): if current_type == "NameNode": os.system("sudo cp BIGDATA/templates/core-site/namenode/core-site.xml /etc/hadoop/core-site.xml") folder = input("Enter Directory: ") subprocess.getstatusoutput("sudo mkdir {}".format(folder)) if not os.system("sudo cp BIGDATA/templates/hdfs-site/namenode/hdfs-site.xml /etc/hadoop/hdfs-site.xml"): return True elif current_type == "DataNode": os.system("sudo cp BIGDATA/templates/core-site/datanode/core-site.xml /etc/hadoop/core-site.xml") folder = input("Enter Directory: ") subprocess.getstatusoutput("sudo mkdir {}".format(folder)) if not os.system("sudo cp BIGDATA/templates/hdfs-site/datanode/hdfs-site.xml /etc/hadoop/hdfs-site.xml"): return True else: os.system("sudo cp BIGDATA/templates/core-site/datanode/core-site.xml /etc/hadoop/core-site.xml") if not os.system("sudo cp BIGDATA/templates/hdfs-site/client/hdfs-site.xml /etc/hadoop/hdfs-site.xml"): return True return False ##################################################################### def LocalCurrentNode(): os.system('tput setaf 4') print(""" Current system is..... Enter 1 For NameNode Enter 2 For DataNode Enter 3 For Client Enter 4 to return back """) os.system('tput setaf 7') choice = input("Enter your Choice: ") if choice == "1": if LocalNodeConfigure("NameNode"): print("completed") else: print("Something went Wrong") elif choice == '2': if LocalNodeConfigure("DataNode"): print("completed") else: print("Something Went Wrong") elif choice == '3': if LocalNodeConfigure("Client"): print("Client Started.....") else: print("Something Went Wrong") elif choice == '4': return else: print("Something Went Wrong") ################################################################################### ################################################################################### def RemoteHadoopInstall(username, password, Ip): status = None if 0 == subprocess.getstatusoutput("sudo yum install initscripts -y")[0]: if 0 == os.system("sshpass -p {} ssh {}@{} yum install wget -y".format(password, username, Ip)): if 0 == os.system( "sshpass -p {} ssh {}@{} sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/hadoop-1.2.1-1.x86_64.rpm".format( password, username, Ip)) and 0 == os.system( "sshpass -p {} ssh {}@{} sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/jdk-8u171-linux-x64.rpm".format( password, username, Ip)): if 0 == os.system( "sshpass -p {} ssh {}@{} rpm -ihv jdk-8u171-linux-x64.rpm".format(password, username, Ip)): status = os.system( "sshpass -p {} ssh {}@{} rpm -ihv hadoop-1.2.1-1.x86_64.rpm --force".format(password, username, Ip)) if status == 0: print("Hadoop successfully installed in your system") else: print("something went wrong please contact support team") ############################################################################### def RemoteNodeConfigure(current_type, username, password, Ip): # HDFS-Site file Configure if current_type == "NameNode": os.system( "sshpass -p {} scp BIGDATA/templates/core-site/namenode/core-site.xml {}@{}:/etc/hadoop/core-site.xml".format( password, username, Ip)) folder = input("Enter CurrentNode Directory: ") subprocess.getoutput("sshpass -p {} ssh {}@{} sudo mkdir {}".format(password, username, Ip, folder)) if not os.system( "sshpass -p {} scp BIGDATA/templates/hdfs-site/namenode/hdfs-site.xml {}@{}:/etc/hadoop/hdfs-site.xml".format( password, username, Ip)): return True elif current_type == "DataNode": os.system( "sshpass -p {} scp BIGDATA/templates/core-site/datanode/core-site.xml {}@{}:/etc/hadoop/core-site.xml".format( password, username, Ip)) folder = input("Enter CurrentNode Directory: ") subprocess.getoutput("sshpass -p {} ssh {}@{} sudo mkdir {}".format(password, username, Ip, folder)) if not os.system( "sshpass -p {} scp BIGDATA/templates/hdfs-site/datanode/hdfs-site.xml {}@{}:/etc/hadoop/hdfs-site.xml".format( password, username, Ip)): return True else: os.system( "sshpass -p {} scp BIGDATA/templates/core-site/datanode/core-site.xml {}@{}:/etc/hadoop/core-site.xml".format( password, username, Ip)) if not os.system( "sshpass -p {} scp BIGDATA/templates/hdfs-site/client/hdfs-site.xml {}@{}:/etc/hadoop/hdfs-site.xml".format( password, username, Ip)): return True return False ######################################################################## def RemoteCurrentNode(username, password, Ip): os.system('tput setaf 4') print(""" Current system is..... Enter 1 For NameNode Enter 2 For DataNode Enter 3 For Client Enter 4 to return back """) os.system('tput setaf 7') choice = input("Enter your Choice: ") if choice == "1": if RemoteNodeConfigure("NameNode", username, password, Ip): print("completed") else: print("Something went Wrong") elif choice == '2': if RemoteNodeConfigure("DataNode", username, password, Ip): print("completed") else: print("Something went Wrong") elif choice == '3': if RemoteNodeConfigure("Client", username, password, Ip): print("Completed") else: print("Something went Wrong") elif choice == '4': return else: print("wrong choice") ######################################################################## ######################################################################## def CloudHadoopInstall(username, key_path, Ip): status = None if 0 == subprocess.getstatusoutput( "ssh -i {} {}@{} sudo yum install initscripts -y".format(key_path, username, Ip))[0]: if 0 == os.system("ssh -i {} {}@{} sudo yum install wget -y".format(key_path, username, Ip)): if 0 == os.system( "ssh -i {} {}@{} sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/hadoop-1.2.1-1.x86_64.rpm".format( key_path, username, Ip)) and 0 == os.system( "ssh -i {} {}@{} sudo wget https://rohitraut04.s3.ap-south-1.amazonaws.com/jdk-8u171-linux-x64.rpm".format( key_path, username, Ip)): if 0 == os.system( "ssh -i {} {}@{} sudo rpm -ihv jdk-8u171-linux-x64.rpm".format(key_path, username, Ip)): status = os.system( "ssh -i {} {}@{} sudo rpm -ihv hadoop-1.2.1-1.x86_64.rpm --force".format(key_path, username, Ip)) if status == 0: print("Hadoop successfully installed in your system") else: print("something went wrong") ######################################################################## def CloudNodeConfigure(current_type, username, key_path, Ip): # HDFS-Site file Configure if current_type == "NameNode": if not os.system( "scp -i {} BIGDATA/templates/core-site/namenode/core-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp core-site.xml /etc/hadoop/core-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm core-site.xml".format(key_path, username, Ip)): pass else: return False folder = input("Enter Directory: ") subprocess.getoutput("ssh -i {} {}@{} sudo mkdir {}".format(key_path, username, Ip, folder)) if not os.system("scp -i {} BIGDATA/templates/hdfs-site/namenode/hdfs-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp hdfs-site.xml /etc/hadoop/hdfs-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm hdfs-site.xml".format(key_path, username, Ip)): return True elif current_type == "DataNode": if not os.system( "scp -i {} BIGDATA/templates/core-site/datanode/core-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp core-site.xml /etc/hadoop/core-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm core-site.xml".format(key_path, username, Ip)): pass else: return False folder = input("Enter Directory: ") subprocess.getoutput("ssh -i {} {}@{} sudo mkdir {}".format(key_path, username, Ip, folder)) if not os.system("scp -i {} BIGDATA/templates/hdfs-site/datanode/hdfs-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp hdfs-site.xml /etc/hadoop/hdfs-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm hdfs-site.xml".format(key_path, username, Ip)): return True else: if not os.system( "scp -i {} BIGDATA/templates/core-site/datanode/core-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp core-site.xml /etc/hadoop/core-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm core-site.xml".format(key_path, username, Ip)): pass else: return False if not os.system( "scp -i {} BIGDATA/templates/hdfs-site/client/hdfs-site.xml {}@{}:/home/{}/".format(key_path, username, Ip, username)) and not os.system( "ssh -i {} {}@{} sudo cp hdfs-site.xml /etc/hadoop/hdfs-site.xml".format(key_path, username, Ip)) and not os.system( "ssh -i {} {}@{} sudo rm hdfs-site.xml".format(key_path, username, Ip)): return True return False ########################################################################## def CloudCurrentNode(username, key_path, Ip): os.system('tput setaf 4') print(""" Current system is..... Enter 1 For NameNode Enter 2 For DataNode Enter 3 For Client Enter 4 to return back """) os.system('tput setaf 7') choice = input("Enter your Choice: ") if choice == "1": if CloudNodeConfigure("NameNode", username, key_path, Ip): print("completed") else: print("Something went Wrong") elif choice == '2': if CloudNodeConfigure("DataNode", username, key_path, Ip): print("Completed") else: print("Something went Wrong") elif choice == '3': if CloudNodeConfigure("Client", key_path, username, Ip): print("Client Started.....") else: print("Something went Wrong") elif choice == '4': return else: print(" wrong choice ") #################################################################################### #################################################################################### #################################################################################### def HadoopMainMenu(): os.system('tput setaf 3') print("\t\t\t\t=====================================================") print("\t\t\t\t\t\tWelcome to Hadoop menu !!") print("\t\t\t\t=====================================================") os.system('tput setaf 4') ostype = input(""" Enter local to work on local operating system Enter remote to work on remote operating system :""") if ostype == "local": while True: os.system('tput setaf 4') print(""" Enter 1 to install hadoop Enter 2 to configure node Enter 3 to format namenode Enter 4 to start/stop hadoop service Enter 5 to get cluster report Enter 6 to see all files Enter 7 to put/rm/read File Enter 8 to return """) os.system('tput setaf 7') choice = input("Enter your choice: ") if choice == "1": LocalHadoopInstall() elif choice == "2": LocalCurrentNode() elif choice == '3': os.system("hadoop namenode -format") elif choice == "4": s = input("Enter start/stop hadoop service : ") if s == "start": service = input("service NameNode/Datanode : ") if service.lower() == "namenode": os.system("hadoop-daemon.sh start namenode") elif service.lower() == "datanode": os.system("hadoop-daemon.sh start datanode") elif s == "stop": service = input("service NameNode/Datanode : ") if service.lower() == "namenode": os.system("hadoop-daemon.sh stop namenode") elif service.lower() == "datanode": os.system("hadoop-daemon.sh stop datanode") else: print("wrong input ") elif choice == "5": os.system("hadoop dfsadmin -report") elif choice == "6": os.system("hadoop fs -ls /") elif choice == "7": c = input("Enter put/rm/read File") if c.lower() == 'put': file_name = input("Enter file name [PATH/filename] : ") os.system("hadoop fs -put {} /".format(file_name)) elif c.lower() == "rm": file_name = input("Enter File name : ") os.system("hadoop fs -rm /{}".format(file_name)) elif c.lower() == "read": file_name = input("Enter file name : ") os.system("hadoop fs -cat /{}".format(file_name)) elif choice == "8": return else: print("not supported") input("Press Enter to continue........") os.system('clear') elif ostype == "remote": username = input("Enter os username : ").strip() ip = input("Enter os ip: ").strip() key_or_password = input("Connect using password/Key : ").strip() if key_or_password.lower() == "password" or key_or_password == "pass": password = getpass.getpass("Enter password: ") os.system("yum install sshpass") while True: os.system('tput setaf 4') print(""" Enter 1 to install hadoop Enter 2 for configure node Enter 3 to format namenode Enter 4 for start/stop hadoop service Enter 5 for get cluster report Enter 6 to see all files in cluster Enter 7 to put/rm/read File Enter 8 to return """) os.system('tput setaf 7') choice = input("Enter you choice : ") if choice == "1": RemoteHadoopInstall(username, password, ip) elif choice == "2": RemoteCurrentNode(username, password, ip) elif choice == "3": os.system( "sshpass -p {} ssh {}@{} hadoop namenode -format".format(password, username, ip)) elif choice == "4": s = input("Enter start/stop hadoop service : ") if s == "start": service = input("service NameNode/Datanode : ") if service.lower() == "namenode": os.system( "sshpass -p {} ssh {}@{} hadoop-daemon.sh start namenode".format(password, username, ip)) os.system('sleep 3') service_state = subprocess.getstatusoutput( "sshpass -p {} ssh {}@{} sudo jps".format(password, username, ip)) if service_state[0] == 0 and 'NameNode' in service_state[1]: print("NameNode Started") else: print("failed to start service") elif service.lower() == "datanode": os.system( "sshpass -p {} ssh {}@{} hadoop-daemon.sh start datanode".format(password, username, ip)) os.system('sleep 3') service_state = subprocess.getstatusoutput( "sshpass -p {} ssh {}@{} sudo jps".format(password, username, ip)) if service_state[0] == 0 and 'DataNode' in service_state[1]: print("DataNode Started") else: print("failed to start service") else: print("Wrong Input") elif s == "stop": service = str(input("service NameNode/Datanode:")) if service.lower() == "namenode": os.system( "sshpass -p {} ssh {}@{} hadoop-daemon.sh stop namenode".format(password, username, ip)) elif service.lower() == "datanode": os.system( "sshpass -p {} ssh {}@{} hadoop-daemon.sh stop datanode".format(password, username, ip)) else: print("Wrong Input") else: print("wrong input ") elif choice == '5': os.system( "sshpass -p {} ssh {}@{} hadoop dfsadmin -report".format(password, username, ip)) elif choice == '6': os.system( "sshpass -p {} ssh {}@{} hadoop fs -ls /".format(password, username, ip)) elif choice == "7": c = input("Enter put/rm/read File") if c.lower() == 'put': file_name = input("Enter file name [PATH/filename] : ") os.system( "sshpass -p {} ssh {}@{} hadoop fs -put {} /".format(password, username, ip, file_name)) elif c.lower() == "rm": file_name = input("Enter File name : ") os.system("sshpass -p {} ssh {}@{} hadoop fs -rm /{}".format(password, username, ip, file_name)) elif c.lower() == "read": file_name = input("Enter file name : ") os.system( "sshpass -p {} ssh {}@{} hadoop fs -cat /{}".format(password, username, ip, file_name)) elif choice == '9': exit() else: print("not supported") input("Press Enter to continue........") os.system('clear') elif key_or_password.lower() == "key": key = input("Enter key in this format { PATH/KeyName.pem } : ") while True: os.system('tput setaf 4') print(""" Enter 1 to install hadoop Enter 2 for configure node Enter 3 to format namenode Enter 4 start/stop hadoop service Enter 5 to get cluster report Enter 6 to see all files in cluster Enter 7 to put/read/rm file in cluster Enter 8 to return """) os.system('tput setaf 7') choice = input("Enter your choice: ") if choice == "1": CloudHadoopInstall(username, key, ip) elif choice == "2": CloudCurrentNode(username, key, ip) elif choice == "3": os.system("ssh -i {} {}@{} sudo hadoop namenode -format".format(key, username, ip)) elif choice == "4": s = input("Enter start/stop hadoop service : ") if s == "start": service = input("service NameNode/Datanode : ") if service.lower() == "namenode": os.system( "ssh -i {} {}@{} sudo hadoop-daemon.sh start namenode".format(key, username, ip)) os.system('sleep 3') service_state = subprocess.getstatusoutput( "ssh -i {} {}@{} sudo jps".format(key, username, ip)) if service_state[0] == 0 and 'NameNode' in service_state[1]: print("NameNode Started") else: print("failed to start service") elif service.lower() == "datanode": os.system( "ssh -i {} {}@{} sudo hadoop-daemon.sh start datanode".format(key, username, ip)) os.system('sleep 3') service_state = subprocess.getstatusoutput( "ssh -i {} {}@{} sudo jps".format(key, username, ip)) if service_state[0] == 0 and 'DataNode' in service_state[1]: print("DataNode Started") else: print("failed to start service") else: print("Wrong Input") elif s == "stop": service = input("service NameNode/Datanode : ") if service.lower() == "namenode": os.system( "ssh -i {} {}@{} sudo hadoop-daemon.sh stop namenode".format(key, username, ip)) elif service.lower() == "datanode": os.system( "ssh -i {} {}@{} sudo hadoop-daemon.sh stop datanode".format(key, username, ip)) else: print("Wrong Input") else: print("wrong input ") elif choice == '5': os.system( "ssh -i {} {}@{} sudo hadoop dfsadmin -report".format(key, username, ip)) elif choice == '6': os.system( "ssh -i {} {}@{} sudo hadoop fs -ls /".format(key, username, ip)) elif choice == "7": c = input("Enter put/rm/read File") if c.lower() == 'put': file_name = input("Enter file name [PATH/filename] : ") os.system("ssh -i {} {}@{} hadoop fs -put {} /".format(key, username, ip, file_name)) elif c.lower() == "rm": file_name = input("Enter File name : ") os.system("ssh -i {} {}@{} hadoop fs -rm /{}".format(key, username, ip, file_name)) elif c.lower() == "read": file_name = input("Enter file name : ") os.system("ssh -i {} {}@{} hadoop fs -cat /{}".format(key, username, ip, file_name)) elif choice == '8': return else: print("not supported") input("Press Enter to continue........") os.system('clear')
49.646643
140
0.434128
2,647
28,100
4.576502
0.064601
0.063398
0.019151
0.03789
0.883276
0.855787
0.817732
0.792637
0.778273
0.743685
0
0.013866
0.409715
28,100
566
141
49.646643
0.716465
0.001744
0
0.747495
0
0.064128
0.335161
0.060979
0
0
0
0
0
1
0.02004
false
0.1002
0.006012
0
0.066132
0.11022
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
1e23a75b66b66aaca69fc27019ff9c61bf0758f0
63,730
py
Python
src/amuse/rfi/tools/create_fortran.py
aatrani/amuse
fd1abcfb1b118a9ab13031912abf6e65e9c60dde
[ "Apache-2.0" ]
1
2019-12-28T22:47:51.000Z
2019-12-28T22:47:51.000Z
src/amuse/rfi/tools/create_fortran.py
aatrani/amuse
fd1abcfb1b118a9ab13031912abf6e65e9c60dde
[ "Apache-2.0" ]
1
2020-01-27T17:01:49.000Z
2020-01-28T02:09:55.000Z
src/amuse/rfi/tools/create_fortran.py
aatrani/amuse
fd1abcfb1b118a9ab13031912abf6e65e9c60dde
[ "Apache-2.0" ]
null
null
null
from amuse.support.core import late from amuse.support import exceptions from amuse import config from amuse.rfi.tools.create_code import GenerateASourcecodeString from amuse.rfi.tools.create_code import GenerateASourcecodeStringFromASpecificationClass from amuse.rfi.tools.create_code import DTypeSpec from amuse.rfi.tools.create_code import dtypes from amuse.rfi.tools.create_code import DTypeToSpecDictionary from amuse.rfi.tools import create_definition from amuse.rfi.core import LegacyFunctionSpecification dtype_to_spec = DTypeToSpecDictionary({ 'int32' : DTypeSpec('integers_in','integers_out','HEADER_INTEGER_COUNT', 'integer', 'integer'), 'int64' : DTypeSpec('longs_in', 'longs_out', 'HEADER_LONG_COUNT', 'integer*8', 'long'), 'float32' : DTypeSpec('floats_in', 'floats_out', 'HEADER_FLOAT_COUNT', 'real*4', 'float'), 'float64' : DTypeSpec('doubles_in', 'doubles_out', 'HEADER_DOUBLE_COUNT', 'real*8', 'double'), 'bool' : DTypeSpec('booleans_in', 'booleans_out', 'HEADER_BOOLEAN_COUNT', 'logical', 'boolean'), 'string' : DTypeSpec('strings_in', 'strings_out', 'HEADER_STRING_COUNT', 'integer*4', 'integer'), }) CONSTANTS_STRING = """ integer HEADER_FLAGS, HEADER_CALL_ID, HEADER_FUNCTION_ID, HEADER_CALL_COUNT, & HEADER_INTEGER_COUNT, HEADER_LONG_COUNT, HEADER_FLOAT_COUNT, & HEADER_DOUBLE_COUNT, HEADER_BOOLEAN_COUNT, HEADER_STRING_COUNT, & HEADER_SIZE, MAX_COMMUNICATORS parameter (HEADER_FLAGS=1, HEADER_CALL_ID=2, HEADER_FUNCTION_ID=3, & HEADER_CALL_COUNT=4, HEADER_INTEGER_COUNT=5, HEADER_LONG_COUNT=6, & HEADER_FLOAT_COUNT=7, HEADER_DOUBLE_COUNT=8, & HEADER_BOOLEAN_COUNT=9, HEADER_STRING_COUNT=10, & HEADER_SIZE=11, MAX_COMMUNICATORS = 2048) """ ARRAY_DEFINES_STRING = """ integer*4, target :: header_in(HEADER_SIZE) integer*4, target :: header_out(HEADER_SIZE) integer*4, allocatable, target :: integers_in(:) integer*4, allocatable, target :: integers_out(:) integer*8, allocatable, target :: longs_in(:) integer*8, allocatable, target :: longs_out(:) real*4, allocatable, target :: floats_in(:) real*4, allocatable, target :: floats_out(:) real*8, allocatable, target :: doubles_in(:) real*8, allocatable, target :: doubles_out(:) logical*1, allocatable, target :: c_booleans_in(:) logical*1, allocatable, target :: c_booleans_out(:) logical, allocatable, target :: booleans_in(:) logical, allocatable, target :: booleans_out(:) integer*4, allocatable, target :: string_sizes_in(:) integer*4, allocatable, target :: string_sizes_out(:) character (len=256), allocatable, target :: strings_in(:) character (len=256), allocatable, target :: strings_out(:) character (len=100000) :: characters_in character (len=100000) :: characters_out """ ISO_ARRAY_DEFINES_STRING = """ integer (c_int32_t), target :: header_in(HEADER_SIZE) integer (c_int32_t), target :: header_out(HEADER_SIZE) integer (c_int32_t), allocatable, target :: integers_in(:) integer (c_int32_t), allocatable, target :: integers_out(:) integer (c_int64_t), allocatable, target :: longs_in(:) integer (c_int64_t), allocatable, target :: longs_out(:) real (c_float), allocatable, target :: floats_in(:) real (c_float), allocatable, target :: floats_out(:) real (c_double), allocatable, target :: doubles_in(:) real (c_double), allocatable, target :: doubles_out(:) logical (c_bool), allocatable, target :: c_booleans_in(:) logical (c_bool), allocatable, target :: c_booleans_out(:) logical, allocatable, target :: booleans_in(:) logical, allocatable, target :: booleans_out(:) integer (c_int32_t), allocatable, target :: string_sizes_in(:) integer (c_int32_t), allocatable, target :: string_sizes_out(:) character (c_char), allocatable, target :: strings_in(:) * 256 character (c_char), allocatable, target :: strings_out(:) * 256 character (len=1000000) :: characters_in character (len=1000000) :: characters_out character (kind=c_char), target :: c_characters_in(1000000) character (kind=c_char), target :: c_characters_out(1000000) """ MODULE_GLOBALS_STRING = """ integer, save :: polling_interval = 0 integer, save :: last_communicator_id = 0 integer, save :: communicators(MAX_COMMUNICATORS) integer, save :: id_to_activate = -1 integer, save :: active_communicator_id = -1 """ NOMPI_MODULE_GLOBALS_STRING = """ integer, save :: polling_interval = 0 """ MPI_INTERNAL_FUNCTIONS_STRING = """ FUNCTION internal__open_port(outval) IMPLICIT NONE INCLUDE 'mpif.h' character(len=MPI_MAX_PORT_NAME+1), intent(out) :: outval INTEGER :: internal__open_port INTEGER :: ierror call MPI_Open_port(MPI_INFO_NULL, outval, ierror); internal__open_port = 0 END FUNCTION FUNCTION internal__accept_on_port(port_identifier, comm_identifier) IMPLICIT NONE INCLUDE 'mpif.h' character(len=*), intent(in) :: port_identifier INTEGER, intent(out) :: comm_identifier INTEGER :: internal__accept_on_port INTEGER :: ierror, rank INTEGER :: mcommunicator, communicator last_communicator_id = last_communicator_id + 1 IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN last_communicator_id = last_communicator_id - 1 comm_identifier = -1 internal__accept_on_port = -1 return; END IF call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror); IF (rank .EQ. 0) THEN call MPI_Comm_accept(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror) call MPI_Intercomm_merge(communicator, 0, mcommunicator, ierror) call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 1, 65, communicators(last_communicator_id), ierror) call MPI_Comm_free(mcommunicator, ierror) call MPI_Comm_free(communicator, ierror) ELSE call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror) END IF comm_identifier = last_communicator_id; internal__accept_on_port = 0 END FUNCTION FUNCTION internal__connect_to_port(port_identifier, comm_identifier) IMPLICIT NONE INCLUDE 'mpif.h' character(len=*), intent(in) :: port_identifier INTEGER, intent(out) :: comm_identifier INTEGER :: internal__connect_to_port INTEGER :: ierror, rank INTEGER :: mcommunicator, communicator last_communicator_id = last_communicator_id + 1 IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN last_communicator_id = last_communicator_id - 1 comm_identifier = -1 internal__connect_to_port = -1 return; END IF call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror); IF (rank .EQ. 0) THEN call MPI_Comm_connect(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror) call MPI_Intercomm_merge(communicator, 1, mcommunicator, ierror) call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 0, 65, communicators(last_communicator_id), ierror) call MPI_Comm_free(mcommunicator, ierror) call MPI_Comm_free(communicator, ierror) ELSE call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror) END IF comm_identifier = last_communicator_id; internal__connect_to_port = 0 END FUNCTION FUNCTION internal__activate_communicator(comm_identifier) IMPLICIT NONE INCLUDE 'mpif.h' INTEGER, intent(in) :: comm_identifier INTEGER :: internal__activate_communicator if ((comm_identifier .LT. 0) .OR. (comm_identifier .GT. last_communicator_id)) then internal__activate_communicator = -1 return end if internal__activate_communicator = 0 id_to_activate = comm_identifier END FUNCTION FUNCTION internal__become_code(number_of_workers, modulename, classname) IMPLICIT NONE character(len=*), intent(in) :: modulename, classname integer, intent(in) :: number_of_workers INTEGER :: internal__become_code internal__become_code = 0 END FUNCTION """ NOMPI_INTERNAL_FUNCTIONS_STRING = """ FUNCTION internal__open_port(outval) IMPLICIT NONE character(len=*), intent(out) :: outval INTEGER :: internal__open_port outval = "" internal__open_port = 0 END FUNCTION FUNCTION internal__accept_on_port(port_identifier, comm_identifier) IMPLICIT NONE character(len=*), intent(in) :: port_identifier INTEGER, intent(out) :: comm_identifier INTEGER :: internal__accept_on_port comm_identifier = -1; internal__accept_on_port = 0 END FUNCTION FUNCTION internal__connect_to_port(port_identifier, comm_identifier) IMPLICIT NONE character(len=*), intent(in) :: port_identifier INTEGER, intent(out) :: comm_identifier INTEGER :: internal__connect_to_port comm_identifier = -1 internal__connect_to_port = 0 END FUNCTION FUNCTION internal__activate_communicator(comm_identifier) IMPLICIT NONE INTEGER, intent(in) :: comm_identifier INTEGER :: internal__activate_communicator internal__activate_communicator = 0 END FUNCTION FUNCTION internal__become_code(number_of_workers, modulename, classname) IMPLICIT NONE character(len=*), intent(in) :: modulename, classname integer, intent(in) :: number_of_workers INTEGER :: internal__become_code internal__become_code = 0 END FUNCTION """ INTERNAL_FUNCTIONS_STRING = MPI_INTERNAL_FUNCTIONS_STRING POLLING_FUNCTIONS_STRING = """ FUNCTION internal__get_message_polling_interval(outval) INTEGER,intent(out) :: outval INTEGER :: internal__get_message_polling_interval outval = polling_interval internal__get_message_polling_interval = 0 END FUNCTION FUNCTION internal__set_message_polling_interval(inval) INTEGER,intent(in) :: inval INTEGER :: internal__set_message_polling_interval polling_interval = inval internal__set_message_polling_interval = 0 END FUNCTION """ RECV_HEADER_SLEEP_STRING = """ SUBROUTINE mpi_recv_header(parent, ioerror) use iso_c_binding implicit none INCLUDE 'mpif.h' integer,intent(in) :: parent integer,intent(inout) :: ioerror integer :: request_status(MPI_STATUS_SIZE),header_request logical is_finished INTERFACE INTEGER (C_INT) FUNCTION usleep(useconds) bind(C) !SUBROUTINE usleep(useconds) bind(C) use iso_c_binding implicit none INTEGER(c_int32_t), value :: useconds END END INTERFACE call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror) if(polling_interval.GT.0) then is_finished = .false. call MPI_Test(header_request, is_finished, request_status, ioerror) DO WHILE(.NOT. is_finished) ioerror = usleep(int(polling_interval, c_int32_t)) call MPI_Test(header_request, is_finished, request_status, ioerror) END DO call MPI_Wait(header_request, request_status, ioerror) else call MPI_Wait(header_request, request_status, ioerror) endif END SUBROUTINE """ RECV_HEADER_WAIT_STRING = """ SUBROUTINE mpi_recv_header(parent, ioerror) implicit none INCLUDE 'mpif.h' integer,intent(in) :: parent integer,intent(inout) :: ioerror integer :: request_status(MPI_STATUS_SIZE),header_request call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror) call MPI_Wait(header_request, request_status, ioerror) END SUBROUTINE """ EMPTY_RUN_LOOP_MPI_STRING = """ SUBROUTINE run_loop_mpi implicit none END SUBROUTINE """ RUN_LOOP_MPI_STRING = """ SUBROUTINE run_loop_mpi implicit none INCLUDE 'mpif.h' integer :: provided integer :: rank, parent, ioerror, max_call_count = 255 integer :: must_run_loop, maximum_size, total_string_length integer i, offset, call_count call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, ioerror) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) !ensure there is at least one string to return an error code in ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) call MPI_COMM_GET_PARENT(parent, ioerror) call MPI_COMM_RANK(parent, rank, ioerror) last_communicator_id = last_communicator_id + 1 communicators(1) = parent active_communicator_id = 1 must_run_loop = 1 do while (must_run_loop .eq. 1) if ((id_to_activate .GE. 0) .AND. (id_to_activate .NE. active_communicator_id)) then active_communicator_id = id_to_activate id_to_activate = -1 parent = communicators(active_communicator_id) call MPI_COMM_RANK(parent, rank, ioerror) end if call mpi_recv_header(parent, ioerror) !print*, 'fortran: got header ', header_in call_count = header_in(HEADER_CALL_COUNT) IF (call_count .gt. max_call_count) THEN max_call_count = call_count + 255; DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(c_booleans_in) DEALLOCATE(c_booleans_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) END IF if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, parent, ioError); end if if (header_in(HEADER_LONG_COUNT) .gt. 0) then call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, parent, ioError); end if if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, parent, ioError); end if if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, parent, ioError); end if if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then ! some older MPI do not define MPI_C_BOOL; this seems to work ok ! maybe booleans_in in this call should be replaced by char (more portable) or logical*1 call MPI_BCast(c_booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, parent, ioError); do i=1,header_in(HEADER_BOOLEAN_COUNT) booleans_in(i)=logical(c_booleans_in(i)) enddo end if if (header_in(HEADER_STRING_COUNT) .gt. 0) then strings_in = ' ' call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, parent, ioError); maximum_size = 0 total_string_length = 0 do i = 1, header_in(HEADER_STRING_COUNT), 1 total_string_length = total_string_length + string_sizes_in(i) + 1 if (string_sizes_in(i) .gt. maximum_size) then maximum_size = string_sizes_in(i) end if end do if(maximum_size.GT.256) then print*, "fortran_worker reports too large string" stop endif if(total_string_length.GT.1000000) then print*, "fortran_worker reports too large string message" stop endif call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, parent, ioError); offset = 1 do i = 1, header_in(HEADER_STRING_COUNT), 1 strings_in(i) = ' ' strings_in(i) = characters_in(offset : (offset + string_sizes_in(i))) strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' ' offset = offset + string_sizes_in(i) + 1 !print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), & !' actually of size ', len_trim(strings_in(i)) end do end if header_out = 0 header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID) header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID) header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT) strings_out = ' ' must_run_loop = handle_call() !print*, 'fortran: sending header ', header_out if (rank .eq. 0 ) then call MPI_SEND(header_out, HEADER_SIZE, MPI_INTEGER, 0, 999, parent, ioerror); if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then call MPI_SEND(integers_out, header_out(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, 999, parent, ioerror) end if if (header_out(HEADER_LONG_COUNT) .gt. 0) then call MPI_SEND(longs_out, header_out(HEADER_LONG_COUNT), MPI_INTEGER8, 0, 999, parent, ioerror) end if if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then call MPI_SEND(floats_out, header_out(HEADER_FLOAT_COUNT), MPI_REAL, 0, 999, parent, ioerror) end if if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then call MPI_SEND(doubles_out, header_out(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, 999, parent, ioerror) end if if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then do i=1,header_out(HEADER_BOOLEAN_COUNT) c_booleans_out(i)=booleans_out(i) enddo call MPI_SEND(c_booleans_out, header_out(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, 999, parent, ioerror) end if if (header_out(HEADER_STRING_COUNT) .gt. 0) then offset = 1 do i = 1, header_out(HEADER_STRING_COUNT),1 string_sizes_out(i) = len_trim(strings_out(i)) !print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), & !' actually of size ', len_trim(strings_out(i)) characters_out(offset:offset+string_sizes_out(i)) = strings_out(i) offset = offset + string_sizes_out(i) + 1 characters_out(offset-1:offset-1) = char(0) end do total_string_length=offset-1 if(total_string_length.GT.1000000) then print*, "fortran_worker reports too large string message" stop endif call MPI_SEND(string_sizes_out, header_out(HEADER_STRING_COUNT), MPI_INTEGER, 0, 999, parent, ioerror) call MPI_SEND(characters_out, offset -1, MPI_CHARACTER, 0, 999, parent, ioerror) end if end if end do DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) do i = 1, last_communicator_id, 1 call MPI_COMM_DISCONNECT(communicators(i), ioerror); end do call MPI_FINALIZE(ioerror) return end subroutine """ RUN_LOOP_SOCKETS_STRING = """ SUBROUTINE run_loop_sockets use iso_c_binding use FortranSocketsInterface implicit none integer :: max_call_count = 255 integer :: must_run_loop, maximum_size, total_string_length integer :: i, offset, call_count, port character(len=32) :: port_string character(kind=c_char, len=64) :: host logical (c_bool), allocatable, target :: c_booleans_in(:) logical (c_bool), allocatable, target :: c_booleans_out(:) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) !ensure there is at least one string to return an error code in ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT))) call get_command_argument(1, port_string) call get_command_argument(2, host) read (port_string,*) port !add a null character to the end of the string so c knows when the string ends host = trim(host) // c_null_char call forsockets_init(host, port) must_run_loop = 1 do while (must_run_loop .eq. 1) call receive_integers(c_loc(header_in), HEADER_SIZE) !print*, 'fortran sockets: got header ', header_in call_count = header_in(HEADER_CALL_COUNT) IF (call_count .gt. max_call_count) THEN max_call_count = call_count + 255; DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(c_booleans_in) DEALLOCATE(c_booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) END IF if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT)) end if if (header_in(HEADER_LONG_COUNT) .gt. 0) then call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT)) end if if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT)) end if if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT)) end if if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT)) do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1 booleans_in(i) = logical(c_booleans_in(i)) end do end if if (header_in(HEADER_STRING_COUNT) .gt. 0) then strings_in = ' ' call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT)) maximum_size = 0 total_string_length = 0 do i = 1, header_in(HEADER_STRING_COUNT), 1 total_string_length = total_string_length + string_sizes_in(i) + 1 if (string_sizes_in(i) .gt. maximum_size) then maximum_size = string_sizes_in(i) end if end do if(maximum_size.GT.256) then print*, "fortran_worker reports too large string" stop endif if(total_string_length.GT.1000000) then print*, "fortran_worker reports too large string message" stop endif call receive_string(c_loc(c_characters_in), total_string_length) ! this trick is necessary on older gfortran compilers (~<4.9) ! as c_loc needs character(len=1) do i=1, total_string_length characters_in(i:i)=c_characters_in(i) enddo offset = 1 do i = 1, header_in(HEADER_STRING_COUNT), 1 strings_in(i) = ' ' strings_in(i) = characters_in(offset : (offset + string_sizes_in(i))) strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' ' offset = offset + string_sizes_in(i) + 1 !print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), & !' actually of size ', len_trim(strings_in(i)) end do end if header_out = 0 header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID) header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID) header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT) strings_out = ' ' must_run_loop = handle_call() !print*, 'fortran: sending header ', header_out call send_integers(c_loc(header_out), HEADER_SIZE) if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT)) end if if (header_out(HEADER_LONG_COUNT) .gt. 0) then call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT)) end if if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT)) end if if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT)) end if if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1 c_booleans_out(i) = logical(booleans_out(i), c_bool) end do call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT)) end if if (header_out(HEADER_STRING_COUNT) .gt. 0) then offset = 1 do i = 1, header_out(HEADER_STRING_COUNT),1 string_sizes_out(i) = len_trim(strings_out(i)) !print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), & !' actually of size ', len_trim(strings_out(i)) characters_out(offset:offset+string_sizes_out(i)) = strings_out(i) offset = offset + string_sizes_out(i) + 1 characters_out(offset-1:offset-1) = char(0) end do total_string_length=offset-1 if(total_string_length.GT.1000000) then print*, "fortran_worker reports too large string message" stop endif do i=1, total_string_length c_characters_out(i)=characters_out(i:i) enddo call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT)) call send_string(c_loc(c_characters_out), offset-1 ) end if end do DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(c_booleans_in) DEALLOCATE(c_booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) call forsockets_close() return end subroutine """ EMPTY_RUN_LOOP_SOCKETS_STRING = """ subroutine run_loop_sockets print*, 'fortran: sockets channel not supported in this worker' return end subroutine """ RUN_LOOP_SOCKETS_MPI_STRING = """ SUBROUTINE run_loop_sockets_mpi use iso_c_binding use FortranSocketsInterface implicit none include 'mpif.h' integer :: provided integer :: max_call_count = 255 integer :: must_run_loop, maximum_size, total_string_length integer :: i, offset, call_count, port, rank, ioerror character(len=32) :: port_string character(kind=c_char, len=64) :: host logical (c_bool), allocatable, target :: c_booleans_in(:) logical (c_bool), allocatable, target :: c_booleans_out(:) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) !ensure there is at least one string to return an error code in ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT))) call mpi_init_thread(mpi_thread_multiple, provided, ioerror) call mpi_comm_rank(MPI_COMM_WORLD, rank, ioerror) if (rank .eq. 0) then call get_command_argument(1, port_string) call get_command_argument(2, host) read (port_string,*) port !add a null character to the end of the string so c knows when the string ends host = trim(host) // c_null_char call forsockets_init(host, port) end if must_run_loop = 1 do while (must_run_loop .eq. 1) if (rank .eq. 0) then call receive_integers(c_loc(header_in), HEADER_SIZE) end if call MPI_BCast(header_in, HEADER_SIZE , MPI_INTEGER, 0, MPI_COMM_WORLD, ioerror) !print*, 'fortran sockets mpi: got header ', header_in call_count = header_in(HEADER_CALL_COUNT) IF (call_count .gt. max_call_count) THEN max_call_count = call_count + 255; DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(c_booleans_in) DEALLOCATE(c_booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN)) ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT)) ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN)) ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT)) ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN)) ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT)) ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN)) ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT)) ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN)) ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT)) ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT)) ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN)) ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT))) END IF if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then if (rank .eq. 0) then call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT)) end if call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError); end if if (header_in(HEADER_LONG_COUNT) .gt. 0) then if (rank .eq. 0) then call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT)) end if call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, MPI_COMM_WORLD, ioError); end if if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then if (rank .eq. 0) then call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT)) end if call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, MPI_COMM_WORLD, ioerror) end if if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then if (rank .eq. 0) then call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT)) end if call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, MPI_COMM_WORLD, ioerror) end if if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then if (rank .eq. 0) then call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT)) do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1 booleans_in(i) = logical(c_booleans_in(i)) end do end if call MPI_BCast(booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_LOGICAL, 0, MPI_COMM_WORLD, ioerror) end if if (header_in(HEADER_STRING_COUNT) .gt. 0) then strings_in = ' ' if (rank .eq. 0) then call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT)) end if call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError); maximum_size = 0 total_string_length = 0 do i = 1, header_in(HEADER_STRING_COUNT), 1 total_string_length = total_string_length + string_sizes_in(i) + 1 if (string_sizes_in(i) .gt. maximum_size) then maximum_size = string_sizes_in(i) end if end do if(maximum_size.GT.256) then print*, "fortran_worker reports too large string" stop endif if(total_string_length.GT.1000000) then print*, "fortran_worker reports too large string message" stop endif if (rank .eq. 0) then call receive_string(c_loc(c_characters_in), total_string_length) endif do i=1, total_string_length characters_in(i:i)=c_characters_in(i) enddo call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, MPI_COMM_WORLD, ioError); offset = 1 do i = 1, header_in(HEADER_STRING_COUNT), 1 strings_in(i) = ' ' strings_in(i) = characters_in(offset : (offset + string_sizes_in(i))) strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' ' offset = offset + string_sizes_in(i) + 1 !print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), & !' actually of size ', len_trim(strings_in(i)) end do end if header_out = 0 header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID) header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID) header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT) strings_out = ' ' must_run_loop = handle_call() call MPI_Barrier(MPI_COMM_WORLD, ioerror) if (rank .eq. 0) then !print*, 'fortran: sending header ', header_out call send_integers(c_loc(header_out), HEADER_SIZE) if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT)) end if if (header_out(HEADER_LONG_COUNT) .gt. 0) then call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT)) end if if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT)) end if if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT)) end if if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1 c_booleans_out(i) = logical(booleans_out(i), c_bool) !print*, 'fortran sockets mpi: sending boolean', booleans_out(i) , i, ' send as ', c_booleans_out(i) end do call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT)) end if if (header_out(HEADER_STRING_COUNT) .gt. 0) then offset = 1 do i = 1, header_out(HEADER_STRING_COUNT),1 string_sizes_out(i) = len_trim(strings_out(i)) !print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), & !' actually of size ', len_trim(strings_out(i)) characters_out(offset:offset+string_sizes_out(i)) = strings_out(i) offset = offset + string_sizes_out(i) + 1 characters_out(offset-1:offset-1) = char(0) end do total_string_length=offset-1 if(total_string_length.GT.1000000) then print*, "fortran_Worker reports too large string message" stop endif do i=1, total_string_length c_characters_out(i)=characters_out(i:i) enddo call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT)) call send_string(c_loc(c_characters_out), offset-1 ) end if end if end do DEALLOCATE(integers_in) DEALLOCATE(integers_out) DEALLOCATE(longs_in) DEALLOCATE(longs_out) DEALLOCATE(floats_in) DEALLOCATE(floats_out) DEALLOCATE(doubles_in) DEALLOCATE(doubles_out) DEALLOCATE(booleans_in) DEALLOCATE(booleans_out) DEALLOCATE(string_sizes_in) DEALLOCATE(string_sizes_out) DEALLOCATE(strings_in) DEALLOCATE(strings_out) if (rank .eq. 0) then call forsockets_close() end if call MPI_FINALIZE(ioerror) return end subroutine """ EMPTY_RUN_LOOP_SOCKETS_MPI_STRING = """ subroutine run_loop_sockets_mpi print*, 'fortran: sockets channel not supported in this worker' return end subroutine """ MAIN_STRING = """ integer :: count logical :: use_mpi character(len=32) :: use_mpi_string count = command_argument_count() use_mpi = NEEDS_MPI if (count .eq. 0) then call run_loop_mpi() else if (count .eq. 3) then call get_command_argument(3, use_mpi_string) if (use_mpi_string .eq. 'true') then use_mpi = .true. else if (use_mpi_string .eq. 'false') then use_mpi = .false. else print*, 'fortran worker: need either true or false as mpi enable arguments, not', use_mpi_string stop end if if (use_mpi) then call run_loop_sockets_mpi() else call run_loop_sockets() end if else print*, 'fortran worker: need either 0 or 3 arguments, not', count stop end if """ class GenerateAFortranStringOfAFunctionSpecification(GenerateASourcecodeString): MAX_STRING_LEN = 256 @late def specification(self): raise exceptions.AmuseException("No specification set, please set the specification first") @late def underscore_functions_from_specification_classes(self): return [] @late def dtype_to_spec(self): return dtype_to_spec def index_string(self, index, must_copy_in_to_out = False): if self.specification.must_handle_array and not must_copy_in_to_out: if index == 0: return '1' else: return '( %d * call_count) + 1' % (index ) elif self.specification.can_handle_array or (self.specification.must_handle_array and must_copy_in_to_out): if index == 0: return 'i' else: if index == -1: return "i - 1" else: return '( %d * call_count) + i' % index else: return index + 1 def start(self): self.specification.prepare_output_parameters() self.output_casestmt_start() self.out.indent() #self.output_lines_before_with_clear_out_variables() #self.output_lines_before_with_clear_input_variables() if self.specification.must_handle_array: pass elif self.specification.can_handle_array: self.out.lf() + 'do i = 1, call_count, 1' self.out.indent() #self.output_lines_before_with_inout_variables() self.output_function_start() self.output_function_parameters() self.output_function_end() self.output_lines_with_inout_variables() if self.specification.must_handle_array: if not self.specification.result_type is None: spec = self.dtype_to_spec[self.specification.result_type] self.out.lf() + 'DO i = 2, call_count' self.out.indent() self.out.lf() + spec.output_var_name + '(i)' + ' = ' + spec.output_var_name + '(1)' self.out.dedent() self.out.lf() + 'END DO' elif self.specification.can_handle_array: self.out.dedent() self.out.lf() + 'end do' self.output_lines_with_number_of_outputs() self.output_casestmt_end() self.out.dedent() self._result = self.out.string def output_function_parameters(self): self.out.indent() first = True for parameter in self.specification.parameters: spec = self.dtype_to_spec[parameter.datatype] if first: first = False self.out + ' &' else: self.out + ' ,&' if parameter.direction == LegacyFunctionSpecification.IN: # if parameter.datatype == 'string': # self.out.n() + 'input_characters(' # self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')' # self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ') +' # self.out + '(' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')' + '-' # self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +') ))' # self.out + ')' # else: if parameter.datatype == 'string': self.out.n() + 'strings_in(' + self.index_string(parameter.input_index) + ')' else: self.out.n() + spec.input_var_name self.out + '(' + self.index_string(parameter.input_index) + ')' if parameter.direction == LegacyFunctionSpecification.INOUT: # if parameter.datatype == 'string': # self.out.n() + 'output_characters(' # self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')' # self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)' # self.out + ')' # else: # if parameter.datatype == 'string': # self.out.n() + spec.input_var_name # self.out + '(' + self.index_string(parameter.input_index) + ', :)' # else: self.out.n() + spec.input_var_name self.out + '(' + self.index_string(parameter.input_index) + ')' elif parameter.direction == LegacyFunctionSpecification.OUT: # if parameter.datatype == 'string': # self.out.n() + 'output_characters(' # self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')' # self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)' # self.out + ')' # else: # if parameter.datatype == 'string': # self.out.n() + spec.output_var_name # self.out + '(' + self.index_string(parameter.output_index) + ')(1:50)' # else: self.out.n() + spec.output_var_name self.out + '(' + self.index_string(parameter.output_index) + ')' elif parameter.direction == LegacyFunctionSpecification.LENGTH: self.out.n() + 'call_count' self.out.dedent() def output_lines_with_inout_variables(self): for parameter in self.specification.parameters: spec = self.dtype_to_spec[parameter.datatype] if parameter.direction == LegacyFunctionSpecification.INOUT: if self.specification.must_handle_array: self.out.lf() + 'DO i = 1, call_count' self.out.indent() self.out.n() + spec.output_var_name self.out + '(' + self.index_string(parameter.output_index, must_copy_in_to_out = True) + ')' self.out + ' = ' self.out + spec.input_var_name + '(' + self.index_string(parameter.input_index, must_copy_in_to_out = True) + ')' if self.specification.must_handle_array: self.out.dedent() self.out.lf() + 'END DO' def output_lines_before_with_clear_out_variables(self): for parameter in self.specification.parameters: spec = self.dtype_to_spec[parameter.datatype] if parameter.is_output(): if parameter.datatype == 'string': self.out.lf() + 'output_characters = "x"' return def output_lines_before_with_clear_input_variables(self): for parameter in self.specification.parameters: spec = self.dtype_to_spec[parameter.datatype] if parameter.is_input(): if parameter.datatype == 'string': self.out.lf() + 'input_characters = "x"' return def output_lines_before_with_inout_variables(self): for parameter in self.specification.parameters: spec = self.dtype_to_spec[parameter.datatype] if parameter.direction == LegacyFunctionSpecification.IN: if parameter.datatype == 'string': self.out.n() + 'input_characters(' self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')' self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)' self.out + ') = &' self.out.lf() self.out + 'characters(' self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')' self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')' self.out + ')' if parameter.direction == LegacyFunctionSpecification.INOUT: if parameter.datatype == 'string': self.out.n() + 'output_characters(' self.out + '( (' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')' self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)' self.out + ') = &' self.out.lf() self.out + 'characters(' self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')' self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')' self.out + ')' def output_lines_with_number_of_outputs(self): dtype_to_count = {} for parameter in self.specification.output_parameters: count = dtype_to_count.get(parameter.datatype, 0) dtype_to_count[parameter.datatype] = count + 1 if not self.specification.result_type is None: count = dtype_to_count.get(self.specification.result_type, 0) dtype_to_count[self.specification.result_type] = count + 1 for dtype in dtype_to_count: spec = self.dtype_to_spec[dtype] count = dtype_to_count[dtype] self.out.n() + 'header_out(' + spec.counter_name + ') = ' + count + ' * call_count' pass def output_function_end(self): self.out + ' &' self.out.n() + ')' def output_function_start(self): self.out.n() if not self.specification.result_type is None: spec = self.dtype_to_spec[self.specification.result_type] # if self.specification.result_type == 'string': # self.out + 'output_characters(' # self.out + '( (' + self.index_string(0) + ')* ' + self.MAX_STRING_LEN + ')' # self.out + ':' + '(((' + self.index_string(0) + ')+1)*' + self.MAX_STRING_LEN + '-1)' # self.out + ') = &' # self.out.lf() # else: self.out + spec.output_var_name self.out + '(' + self.index_string(0) + ')' + ' = ' else: self.out + 'CALL ' self.out + self.specification.name if self.must_add_underscore_to_function(self.specification): self.out + '_' self.out + '(' def output_casestmt_start(self): self.out + 'CASE(' + self.specification.id + ')' def output_casestmt_end(self): self.out.n() def must_add_underscore_to_function(self, x): for cls in self.underscore_functions_from_specification_classes: if hasattr(cls, x.name): return True return False class GenerateAFortranSourcecodeStringFromASpecificationClass(GenerateASourcecodeStringFromASpecificationClass): MAX_STRING_LEN = 256 @late def dtype_to_spec(self): return dtype_to_spec @late def number_of_types(self): return len(self.dtype_to_spec) @late def length_of_the_header(self): return 2 + self.number_of_types @late def underscore_functions_from_specification_classes(self): return [] def output_sourcecode_for_function(self): result = GenerateAFortranStringOfAFunctionSpecification() result.underscore_functions_from_specification_classes = self.underscore_functions_from_specification_classes return result def output_needs_mpi(self): self.out.lf() + 'logical NEEDS_MPI' if (hasattr(self, 'needs_mpi') and self.needs_mpi) and self.must_generate_mpi: self.out.lf() + 'parameter (NEEDS_MPI=.true.)' else: self.out.lf() + 'parameter (NEEDS_MPI=.false.)' self.out.lf().lf() def start(self): self.use_iso_c_bindings = config.compilers.fc_iso_c_bindings self.out + 'program amuse_worker_program' self.out.indent() self.output_modules() if self.use_iso_c_bindings: self.out.n() + 'use iso_c_binding' self.out.n() + 'implicit none' self.out.n() + CONSTANTS_STRING self.output_needs_mpi() self.output_maximum_constants() if self.must_generate_mpi: self.out.lf().lf() + MODULE_GLOBALS_STRING else: self.out.lf().lf() + NOMPI_MODULE_GLOBALS_STRING if self.use_iso_c_bindings: self.out.n() + ISO_ARRAY_DEFINES_STRING else: self.out.n() + ARRAY_DEFINES_STRING self.out.lf().lf() + MAIN_STRING self.out.lf().lf() + 'CONTAINS' self.out + POLLING_FUNCTIONS_STRING if self.must_generate_mpi: self.out + INTERNAL_FUNCTIONS_STRING if self.use_iso_c_bindings: self.out + RECV_HEADER_SLEEP_STRING else: self.out + RECV_HEADER_WAIT_STRING self.out + RUN_LOOP_MPI_STRING else: self.out + NOMPI_INTERNAL_FUNCTIONS_STRING self.out + EMPTY_RUN_LOOP_MPI_STRING if self.use_iso_c_bindings: self.out.n() + RUN_LOOP_SOCKETS_STRING if self.must_generate_mpi: self.out.n() + RUN_LOOP_SOCKETS_MPI_STRING else: self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING else: self.out.n() + EMPTY_RUN_LOOP_SOCKETS_STRING self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING self.output_handle_call() self.out.dedent() self.out.n() + 'end program amuse_worker_program' self._result = self.out.string def output_mpi_include(self): self.out.n() + "INCLUDE 'mpif.h'" def output_modules(self): self.out.n() if hasattr(self.specification_class, 'use_modules'): for x in self.specification_class.use_modules: self.out.n() + 'use ' + x def must_include_declaration_of_function(self, x): if x.specification.name.startswith("internal__"): return False return True def output_declarations_for_the_functions(self): if not hasattr(self.specification_class, 'use_modules'): for x in self.interface_functions: if not self.must_include_declaration_of_function(x): continue specification = x.specification if specification.id == 0: continue if specification.result_type is None: continue if specification.result_type == 'string': type = 'CHARACTER(len=255)' else: spec = self.dtype_to_spec[specification.result_type] type = spec.type self.out.lf() + type + ' :: ' + specification.name if self.must_add_underscore_to_function(x): self.out + '_' def must_add_underscore_to_function(self, x): for cls in self.underscore_functions_from_specification_classes: if hasattr(cls, x.specification.name): return True return False def output_handle_call(self): self.out.lf() + 'integer function handle_call()' self.out.indent().n() self.out.lf() + 'implicit none' self.output_declarations_for_the_functions() self.out.lf() + 'integer i, call_count' self.out.lf() + 'call_count = header_in(HEADER_CALL_COUNT)' self.out.lf() + 'handle_call = 1' self.out.lf() + 'SELECT CASE (header_in(HEADER_FUNCTION_ID))' self.out.indent().n() self.out.lf() + 'CASE(0)' self.out.indent().lf()+'handle_call = 0' self.out.dedent() self.output_sourcecode_for_functions() self.out.lf() + 'CASE DEFAULT' self.out.indent() self.out.lf() + 'header_out(HEADER_STRING_COUNT) = 1' self.out.lf() + 'header_out(HEADER_FLAGS) = IOR(header_out(HEADER_FLAGS), 256) ' self.out.lf() + "strings_out(1) = 'error, illegal function id'" self.out.dedent() self.out.dedent().n() + 'END SELECT' self.out.n() + 'return' self.out.dedent() self.out.n() + 'end function' def output_maximum_constants(self): self.out.lf() + 'integer MAX_INTEGERS_IN, MAX_INTEGERS_OUT, MAX_LONGS_IN, MAX_LONGS_OUT, &' self.out.lf() + 'MAX_FLOATS_IN, MAX_FLOATS_OUT, MAX_DOUBLES_IN,MAX_DOUBLES_OUT, &' self.out.lf() + 'MAX_BOOLEANS_IN,MAX_BOOLEANS_OUT, MAX_STRINGS_IN, MAX_STRINGS_OUT' self.out.lf() for dtype in self.dtype_to_spec.keys(): dtype_spec = self.dtype_to_spec[dtype] maximum = self.mapping_from_dtype_to_maximum_number_of_inputvariables.get(dtype,0) self.out.n() + 'parameter (MAX_' + dtype_spec.input_var_name.upper() + '=' + maximum + ')' maximum =self.mapping_from_dtype_to_maximum_number_of_outputvariables.get(dtype,0) self.out.n() + 'parameter (MAX_' + dtype_spec.output_var_name.upper() + '=' + maximum + ')' class GenerateAFortranStubStringFromASpecificationClass\ (GenerateASourcecodeStringFromASpecificationClass): @late def dtype_to_spec(self): return dtype_to_spec @late def ignore_functions_from_specification_classes(self): return [] @late def underscore_functions_from_specification_classes(self): return [] def output_sourcecode_for_function(self): result = create_definition.CreateFortranStub() result.output_definition_only = False return result def start(self): self.output_modules() self.out.lf() self.output_sourcecode_for_functions() self.out.lf() self._result = self.out.string def must_include_interface_function_in_output(self, x): if x.specification.name.startswith("internal__"): return False for cls in self.ignore_functions_from_specification_classes: if hasattr(cls, x.specification.name): return False return True def output_modules(self): self.out.n() if hasattr(self.specification_class, 'use_modules'): for x in self.specification_class.use_modules: self.out.n() + 'use ' + x
38.694596
132
0.623427
7,816
63,730
4.730169
0.042733
0.029158
0.034081
0.038949
0.832896
0.794731
0.752617
0.704579
0.666847
0.645614
0
0.011476
0.284889
63,730
1,646
133
38.718105
0.799754
0.03802
0
0.713276
1
0.007855
0.714309
0.247172
0
0
0
0
0
1
0.029065
false
0.001571
0.007855
0.00707
0.067557
0.020424
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
1e265b3ae8c065184df7633e84cd6bebc489d53b
3,558
py
Python
tests/integration/test_debug_layout.py
benjamintemitope/SublimeTextXdebug
7b62975aed85f4bc839d908d7a696d1ca2b794d9
[ "MIT" ]
344
2015-01-03T01:55:52.000Z
2022-01-11T08:52:55.000Z
tests/integration/test_debug_layout.py
benjamintemitope/SublimeTextXdebug
7b62975aed85f4bc839d908d7a696d1ca2b794d9
[ "MIT" ]
107
2015-01-05T12:46:39.000Z
2021-03-25T04:56:16.000Z
tests/integration/test_debug_layout.py
benjamintemitope/SublimeTextXdebug
7b62975aed85f4bc839d908d7a696d1ca2b794d9
[ "MIT" ]
82
2015-01-10T16:02:50.000Z
2022-01-18T19:25:58.000Z
try: from xdebug.unittesting import XdebugDeferrableTestCase except: from SublimeTextXdebug.xdebug.unittesting import XdebugDeferrableTestCase class TestDebugLayout(XdebugDeferrableTestCase): def window_does_not_have_debug_layout(self): breakpoint_view = self.get_view_by_title('Xdebug Breakpoint') context_view = self.get_view_by_title('Xdebug Context') stack_view = self.get_view_by_title('Xdebug Stack') watch_view = self.get_view_by_title('Xdebug Watch') return not breakpoint_view and not context_view and not stack_view and not watch_view def test_debug_layout_remains_open_on_session_stop(self): self.set_xdebug_settings({ 'break_on_start': True }) yield self.window_has_xdebug_settings self.run_command('xdebug_session_start') yield self.window_has_debug_layout stack_view = self.get_view_by_title('Xdebug Stack') self.send_server_request() def stack_has_content(): return not self.view_is_empty(stack_view) yield stack_has_content self.run_command('xdebug_session_stop') self.assertIsNotNone(self.get_view_by_title('Xdebug Breakpoint')) self.assertIsNotNone(self.get_view_by_title('Xdebug Context')) self.assertIsNotNone(self.get_view_by_title('Xdebug Stack')) self.assertIsNotNone(self.get_view_by_title('Xdebug Watch')) def test_debug_layout_is_restored_after_session_stop(self): self.set_xdebug_settings({ 'break_on_start': True }) yield self.window_has_xdebug_settings self.run_command('xdebug_session_start') yield self.window_has_debug_layout stack_view = self.get_view_by_title('Xdebug Stack') self.send_server_request() def stack_has_content(): return not self.view_is_empty(stack_view) yield stack_has_content self.run_command('xdebug_session_stop') self.assertIsNotNone(self.get_view_by_title('Xdebug Breakpoint')) self.assertIsNotNone(self.get_view_by_title('Xdebug Context')) self.assertIsNotNone(self.get_view_by_title('Xdebug Stack')) self.assertIsNotNone(self.get_view_by_title('Xdebug Watch')) self.run_command('xdebug_layout', {'restore': True}) yield self.window_does_not_have_debug_layout self.assertIsNone(self.get_view_by_title('Xdebug Breakpoint')) self.assertIsNone(self.get_view_by_title('Xdebug Context')) self.assertIsNone(self.get_view_by_title('Xdebug Stack')) self.assertIsNone(self.get_view_by_title('Xdebug Watch')) def test_debug_layout_is_closed_on_session_stop(self): self.set_xdebug_settings({ 'break_on_start': True, 'close_on_stop': True }) yield self.window_has_xdebug_settings self.run_command('xdebug_session_start') yield self.window_has_debug_layout stack_view = self.get_view_by_title('Xdebug Stack') self.send_server_request() def stack_has_content(): return not self.view_is_empty(stack_view) yield stack_has_content self.run_command('xdebug_session_stop') yield self.window_does_not_have_debug_layout self.assertIsNone(self.get_view_by_title('Xdebug Breakpoint')) self.assertIsNone(self.get_view_by_title('Xdebug Context')) self.assertIsNone(self.get_view_by_title('Xdebug Stack')) self.assertIsNone(self.get_view_by_title('Xdebug Watch'))
37.851064
93
0.715571
460
3,558
5.11087
0.121739
0.068482
0.107614
0.12718
0.845598
0.845598
0.845598
0.786899
0.786899
0.770736
0
0
0.201799
3,558
93
94
38.258065
0.827817
0
0
0.771429
0
0
0.141372
0
0
0
0
0
0.228571
1
0.1
false
0
0.028571
0.042857
0.2
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
1e4d5dee2dccb4bc591ffa0d5a11cc09f618dd0b
2,367
py
Python
loading.py
Cherepok3-python/loading
e2719dd3d116cf3cded01fdbb152efc11d32e8b6
[ "MIT" ]
null
null
null
loading.py
Cherepok3-python/loading
e2719dd3d116cf3cded01fdbb152efc11d32e8b6
[ "MIT" ]
null
null
null
loading.py
Cherepok3-python/loading
e2719dd3d116cf3cded01fdbb152efc11d32e8b6
[ "MIT" ]
null
null
null
import time import sys sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rloading |') time.sleep(0.1) sys.stdout.write('\rloading /') time.sleep(0.1) sys.stdout.write('\rloading -') time.sleep(0.1) sys.stdout.write('\rloading \\') time.sleep(0.1) sys.stdout.write('\rDone!')
23.67
32
0.685256
392
2,367
4.137755
0.02551
0.271887
0.422935
0.651048
0.985203
0.985203
0.985203
0.985203
0.985203
0.985203
0
0.043282
0.062949
2,367
100
33
23.67
0.688007
0
0
0.969697
0
0
0.230997
0
0
0
0
0
0
1
0
true
0
0.020202
0
0.020202
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
11
1ec98e5639493d631e014d9de4bd49e85d1da890
140
py
Python
jmu_gradescope_utils/__init__.py
JMU-CS/jmu_python_gradescope_utils
842d6bdb7d6271eaffbf589942b7dd18e6f91f41
[ "MIT" ]
null
null
null
jmu_gradescope_utils/__init__.py
JMU-CS/jmu_python_gradescope_utils
842d6bdb7d6271eaffbf589942b7dd18e6f91f41
[ "MIT" ]
1
2022-01-11T13:22:08.000Z
2022-01-11T14:25:06.000Z
jmu_gradescope_utils/__init__.py
JMU-CS/jmu_python_gradescope_utils
842d6bdb7d6271eaffbf589942b7dd18e6f91f41
[ "MIT" ]
null
null
null
from jmu_gradescope_utils.jmu_test_case import * from jmu_gradescope_utils.utils import * from jmu_gradescope_utils.coverage_utils import *
35
49
0.871429
21
140
5.380952
0.380952
0.185841
0.451327
0.584071
0.495575
0
0
0
0
0
0
0
0.085714
140
3
50
46.666667
0.882813
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
1ee59ea523e79c9d0668d2d7685ab257ffa40525
2,325
py
Python
ds/numpy.py
amresht/hackerrank
184502d275654fde68b93c3a4fbb341bbf1f81ca
[ "MIT" ]
null
null
null
ds/numpy.py
amresht/hackerrank
184502d275654fde68b93c3a4fbb341bbf1f81ca
[ "MIT" ]
null
null
null
ds/numpy.py
amresht/hackerrank
184502d275654fde68b93c3a4fbb341bbf1f81ca
[ "MIT" ]
null
null
null
{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'null' is not defined", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m<ipython-input-2-d4cdadb62aa7>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[1;32mG:\\GITHUB\\hackerrank\\ds\\numpy.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m {\n\u001b[0;32m 26\u001b[0m \u001b[1;34m\"cell_type\"\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;34m\"code\"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[1;34m\"execution_count\"\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mnull\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[1;34m\"metadata\"\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;34m\"outputs\"\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mNameError\u001b[0m: name 'null' is not defined" ] } ], "source": [ "import numpy\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 4 }
42.272727
957
0.594839
355
2,325
3.856338
0.225352
0.245435
0.315559
0.265888
0.609204
0.450694
0.427319
0.427319
0.406866
0.385683
0
0.251266
0.150968
2,325
54
958
43.055556
0.442249
0
0
0.111111
0
0.12963
0.777634
0.466237
0
0
0
0
0
1
0
true
0
0.037037
0
0.037037
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
8
94dc3b3557bcf2cdeb2b6fb7bde083db56feba53
12,436
py
Python
test/test_cii.py
bepec/pydvbcss
72bec02d42582416390ea0379dc6b79da8cd0721
[ "Apache-2.0" ]
22
2015-03-15T17:24:47.000Z
2021-12-23T01:42:24.000Z
test/test_cii.py
bepec/pydvbcss
72bec02d42582416390ea0379dc6b79da8cd0721
[ "Apache-2.0" ]
15
2016-02-21T20:05:03.000Z
2021-01-11T12:19:18.000Z
test/test_cii.py
bepec/pydvbcss
72bec02d42582416390ea0379dc6b79da8cd0721
[ "Apache-2.0" ]
6
2015-03-30T11:41:20.000Z
2020-12-16T11:16:00.000Z
#!/usr/bin/env python # # Copyright 2015 British Broadcasting Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import _useDvbCssUninstalled # Enable to run when dvbcss not yet installed ... @UnusedImport from dvbcss.protocol.cii import CII from dvbcss.protocol.cii import TimelineOption from dvbcss.protocol.transformers import OMIT import json class Test_CII(unittest.TestCase): def test_create_empty(self): c=CII() self.assertEquals(OMIT, c.protocolVersion) self.assertEquals(OMIT, c.contentId) self.assertEquals(OMIT, c.contentIdStatus) self.assertEquals(OMIT, c.presentationStatus) self.assertEquals(OMIT, c.mrsUrl) self.assertEquals(OMIT, c.wcUrl) self.assertEquals(OMIT, c.tsUrl) self.assertEquals(OMIT, c.teUrl) self.assertEquals(OMIT, c.timelines) self.assertEquals(OMIT, c.private) def test_pack_empty_message(self): c=CII() msg=c.pack() j=json.loads(msg) self.assertEquals(j,{}) def test_unpack_empty_message(self): msg="{}" c=CII.unpack(msg) self.assertEquals(OMIT, c.protocolVersion) self.assertEquals(OMIT, c.contentId) self.assertEquals(OMIT, c.contentIdStatus) self.assertEquals(OMIT, c.presentationStatus) self.assertEquals(OMIT, c.mrsUrl) self.assertEquals(OMIT, c.wcUrl) self.assertEquals(OMIT, c.tsUrl) self.assertEquals(OMIT, c.teUrl) self.assertEquals(OMIT, c.timelines) self.assertEquals(OMIT, c.private) def test_unpack_ignore_unknown_fields(self): msg="""{ "flurble" : 5 }""" c=CII.unpack(msg) self.assertEquals(OMIT, c.protocolVersion) self.assertEquals(OMIT, c.contentId) self.assertEquals(OMIT, c.contentIdStatus) self.assertEquals(OMIT, c.presentationStatus) self.assertEquals(OMIT, c.mrsUrl) self.assertEquals(OMIT, c.wcUrl) self.assertEquals(OMIT, c.tsUrl) self.assertEquals(OMIT, c.teUrl) self.assertEquals(OMIT, c.timelines) self.assertEquals(OMIT, c.private) def test_pack_unpack_protocolVersion(self): c=CII(protocolVersion="1.1") self.assertEquals(c.protocolVersion, "1.1") msg=c.pack() j=json.loads(msg) self.assertEquals(j["protocolVersion"], "1.1") self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals("1.1", c.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_contentId(self): VALUE="dvb://a.b.c.d" c=CII(contentId=VALUE) self.assertEquals(c.contentId, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["contentId"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(VALUE, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_contentIdStatus(self): for VALUE in ["partial","final"]: c=CII(contentIdStatus=VALUE) self.assertEquals(c.contentIdStatus, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["contentIdStatus"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(VALUE, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_presentationStatus(self): STATUSES=[ ( ["okay"], "okay" ), ( ["transitioning"], "transitioning" ), ( ["fault"], "fault" ), ( ["other"], "other" ), ( ["okay", "sub"], "okay sub" ), ( ["transitioning", "sub1", "sub2"], "transitioning sub1 sub2" ), ] for (VALUE, ENCODED) in STATUSES: c=CII(presentationStatus=VALUE) self.assertEquals(c.presentationStatus, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["presentationStatus"], ENCODED) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(VALUE, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_presentationStatus_not_a_string(self): c=CII(presentationStatus="okay") with self.assertRaises(ValueError): c.pack() def test_pack_unpack_mrsUrl(self): VALUE="http://blah.com" c=CII(mrsUrl=VALUE) self.assertEquals(c.mrsUrl, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["mrsUrl"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(VALUE, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_wcUrl(self): VALUE="udp://1.2.3.4:1234" c=CII(wcUrl=VALUE) self.assertEquals(c.wcUrl, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["wcUrl"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(VALUE, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_tsUrl(self): VALUE="ws://1.2.3.4:5678/blah/" c=CII(tsUrl=VALUE) self.assertEquals(c.tsUrl, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["tsUrl"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(VALUE, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_teUrl(self): VALUE="ws://1.2.3.4:5678/seilgr" c=CII(teUrl=VALUE) self.assertEquals(c.teUrl, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["teUrl"], VALUE) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(VALUE, d.teUrl) self.assertEquals(OMIT, d.timelines) self.assertEquals(OMIT, d.private) def test_pack_unpack_timelines(self): TIMELINES=[ ( [], [] ), ( [ TimelineOption("urn:dvb:css:timeline:pts", 1, 1000, 0.2, OMIT) ], [ { "timelineSelector" : "urn:dvb:css:timeline:pts", "timelineProperties" : { "unitsPerTick" : 1, "unitsPerSecond" : 1000, "accuracy" : 0.2 } } ] ), ( [ TimelineOption("urn:dvb:css:timeline:pts", 1, 1000, 0.2, OMIT), TimelineOption("urn:dvb:css:timeline:temi:1:5", 1001, 30000, OMIT, []), TimelineOption("urn:dvb:css:timeline:temi:1:6", 1, 25, OMIT, [{'type':'blah','abc':5},{'type':'bbc','pqr':None}]), ], [ { "timelineSelector" : "urn:dvb:css:timeline:pts", "timelineProperties" : { "unitsPerTick" : 1, "unitsPerSecond" : 1000, "accuracy" : 0.2 } }, { "timelineSelector" : "urn:dvb:css:timeline:temi:1:5", "timelineProperties" : { "unitsPerTick" : 1001, "unitsPerSecond" : 30000 }, "private" : [] }, { "timelineSelector" : "urn:dvb:css:timeline:temi:1:6", "timelineProperties" : { "unitsPerTick" : 1, "unitsPerSecond" : 25, }, "private" : [{'type':'blah','abc':5},{'type':'bbc','pqr':None}] } ] ), ] for (VALUE, ENCODED) in TIMELINES: c=CII(timelines=VALUE) self.assertEquals(c.timelines, VALUE) msg=c.pack() j=json.loads(msg) self.assertEquals(j["timelines"], ENCODED) self.assertEquals(len(j.keys()), 1) d=CII.unpack(msg) self.assertEquals(OMIT, d.protocolVersion) self.assertEquals(OMIT, d.contentId) self.assertEquals(OMIT, d.contentIdStatus) self.assertEquals(OMIT, d.presentationStatus) self.assertEquals(OMIT, d.mrsUrl) self.assertEquals(OMIT, d.wcUrl) self.assertEquals(OMIT, d.tsUrl) self.assertEquals(OMIT, d.teUrl) self.assertEquals(VALUE, d.timelines) self.assertEquals(OMIT, d.private) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
37.233533
130
0.58017
1,333
12,436
5.372093
0.128282
0.33068
0.310013
0.237537
0.731602
0.716101
0.716101
0.699623
0.676442
0.676442
0
0.013268
0.29696
12,436
333
131
37.345345
0.805787
0.056127
0
0.613139
0
0
0.074396
0.022097
0
0
0
0
0.543796
1
0.051095
false
0
0.021898
0
0.076642
0
0
0
0
null
1
1
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7
bf4a453b867ccfa9b993bfae82a8a7628c418600
1,233
py
Python
aaaaaaaaaaaaaaaaaaaaaaaaaaaaa.py
miemie2013/ppgan
48008d85ec6c5fa2e1469acf8507b2614fa550cc
[ "Apache-2.0" ]
null
null
null
aaaaaaaaaaaaaaaaaaaaaaaaaaaaa.py
miemie2013/ppgan
48008d85ec6c5fa2e1469acf8507b2614fa550cc
[ "Apache-2.0" ]
null
null
null
aaaaaaaaaaaaaaaaaaaaaaaaaaaaa.py
miemie2013/ppgan
48008d85ec6c5fa2e1469acf8507b2614fa550cc
[ "Apache-2.0" ]
1
2022-01-19T03:01:13.000Z
2022-01-19T03:01:13.000Z
import torch import numpy as np ckpt_file = 'D_00.pth' state_dict = torch.load(ckpt_file, map_location=torch.device('cpu')) model_dic1 = {} for key, value in state_dict.items(): model_dic1[key] = value.data.numpy() ckpt_file = 'D_19.pth' state_dict = torch.load(ckpt_file, map_location=torch.device('cpu')) model_dic2 = {} for key, value in state_dict.items(): model_dic2[key] = value.data.numpy() for key, value in model_dic1.items(): value2 = model_dic2[key] ddd = np.sum((value - value2) ** 2) if ddd > 110.00001: print() print('ddd=%.6f' % ddd) print('==============================================') print() ckpt_file = 'G_00.pth' state_dict = torch.load(ckpt_file, map_location=torch.device('cpu')) model_dic1 = {} for key, value in state_dict.items(): model_dic1[key] = value.data.numpy() ckpt_file = 'G_19.pth' state_dict = torch.load(ckpt_file, map_location=torch.device('cpu')) model_dic2 = {} for key, value in state_dict.items(): model_dic2[key] = value.data.numpy() for key, value in model_dic1.items(): value2 = model_dic2[key] ddd = np.sum((value - value2) ** 2) if ddd > 0.00001: print() print('ddd=%.6f' % ddd) print()
17.366197
68
0.628548
185
1,233
3.994595
0.205405
0.108254
0.08931
0.105548
0.925575
0.925575
0.925575
0.849797
0.849797
0.849797
0
0.041625
0.181671
1,233
70
69
17.614286
0.690783
0
0
0.756757
0
0
0.086743
0.037643
0
0
0
0
0
1
0
false
0
0.054054
0
0.054054
0.189189
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bfa2059168ae8eea3df225d37903fa106d886b15
13,870
py
Python
dataset/global_sampler.py
Jeff-Zilence/TransGeo2022
f6e652237a29bad1167699905398042af86a0b0a
[ "MIT" ]
1
2022-03-22T23:03:04.000Z
2022-03-22T23:03:04.000Z
dataset/global_sampler.py
Jeff-Zilence/TransGeo2022
f6e652237a29bad1167699905398042af86a0b0a
[ "MIT" ]
null
null
null
dataset/global_sampler.py
Jeff-Zilence/TransGeo2022
f6e652237a29bad1167699905398042af86a0b0a
[ "MIT" ]
null
null
null
import math from typing import TypeVar, Optional, Iterator import os import torch from torch.utils.data.distributed import DistributedSampler from torch.utils.data.dataset import Dataset import torch.distributed as dist import numpy as np import random T_co = TypeVar('T_co', covariant=True) # This sampler is implemented for multi-GPU training, modified from the DistributedSampler of pytorch. # The strategy follows VIGOR class DistributedMiningSampler(DistributedSampler[T_co]): def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, rank: Optional[int] = None, shuffle: bool = False, seed: int = 0, drop_last: bool = False, batch_size: int = 128, mode = 'similarity', dim=1000, save_path = None) -> None: super(DistributedMiningSampler, self).__init__(dataset, num_replicas, rank, shuffle, seed, drop_last) self.dim = dim self.batch_size = batch_size * self.num_replicas self.queue_length = len(self.dataset) self.current_size = len(self.dataset) // self.batch_size * self.batch_size self.current_indices = np.arange(self.current_size) self.queue_size = 1 # for computing moving average, not used in this implementation self.queue = np.zeros([self.queue_length, self.queue_size, self.dim, 2]) self.queue_ptr = 0 self.queue_counter = np.zeros(self.queue_length,dtype=np.int) self.save_path = save_path self.mining_start = 1 self.mining_pool_size = min(40000, self.queue_length) self.mining_save_size = 100 self.choice_pool = range(self.mining_save_size) self.mining_save = np.zeros([self.queue_length, self.mining_save_size],dtype=int) self.mode = mode def update(self, data_sat, data_grd, indexes): data_sat_norm = data_sat / np.linalg.norm(data_sat, axis=1, keepdims=True) data_grd_norm = data_grd / np.linalg.norm(data_grd, axis=1, keepdims=True) batch_size = data_sat.shape[0] # writing in distributed training style, complicated. Update the queue according to the previous index. for j in range(self.num_replicas): index_j = self.indices_out[j:self.current_size:self.num_replicas] for i in range(batch_size // self.num_replicas): index = index_j[self.queue_ptr + i] assert index == indexes[i + j * (batch_size // self.num_replicas)] self.queue[index, self.queue_counter[index] % self.queue_size, :, 0] = \ data_sat_norm[i + j * (batch_size // self.num_replicas)] self.queue[index, self.queue_counter[index] % self.queue_size, :, 1] = \ data_grd_norm[i + j * (batch_size // self.num_replicas)] self.queue_counter[index] += 1 self.queue_ptr = (self.queue_ptr + batch_size // self.num_replicas) def generate_indices_sim(self): self.queue_ptr = 0 random.seed(7 + self.epoch) self.current_indices = np.arange(self.current_size) random.shuffle(self.current_indices) if self.epoch >= self.mining_start: assert self.mining_pool_size <= self.queue_length mining_pool = np.array(random.sample(range(self.queue_length), self.mining_pool_size),dtype=int) product_train = np.matmul(self.queue[:,:,:,1].mean(axis=1), np.transpose(self.queue[mining_pool,:,:,0].mean(axis=1))) product_index = np.argsort(product_train, axis=1) ranking = np.zeros(product_train.shape[0]) # update mining pool for i in range(product_train.shape[0]): ranking[i] = product_train.shape[1] - 1 - np.where(mining_pool[product_index[i]] == i)[0] self.mining_save[i, :] = mining_pool[product_index[i, -self.mining_save_size:]] # randomly sample first half ori_list = self.current_indices[:self.current_size//2] self.current_indices = [] # global hard mining for the other half for i in range(self.current_size//self.batch_size): index_s = i * (self.batch_size//2) index_e = index_s + min(self.batch_size//2, self.current_size//2 - index_s) self.current_indices.extend(ori_list[index_s:index_e]) hard_list = [] for j in range(index_s, index_e): idx = random.choice(self.mining_save[ori_list[j]]) # random sampling until no overlap in the batch while idx in ori_list[index_s:index_e] or idx in hard_list: idx = random.choice(self.mining_save[ori_list[j]]) hard_list.append(idx) self.current_indices.extend(hard_list) self.current_indices = np.array(self.current_indices, dtype=int) assert len(self.current_indices) == self.current_size print('sampler updated!') def update_epoch(self): self.generate_indices_sim() if self.rank == 0: np.save(os.path.join(self.save_path,'queue.npy'), self.queue) np.save(os.path.join(self.save_path,'queue_counter.npy'), self.queue_counter) def load(self, path): self.queue_counter = np.load(os.path.join(path,'queue_counter.npy')) self.queue = np.load(os.path.join(path,'queue.npy')) def __iter__(self) -> Iterator[T_co]: if self.shuffle: # deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(len(self.current_indices), generator=g).tolist() # type: ignore else: indices = list(range(len(self.current_indices))) # type: ignore if not self.drop_last: # add extra samples to make it evenly divisible padding_size = self.current_size - len(indices) if padding_size <= len(indices): indices += indices[:padding_size] else: indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size] else: # remove tail of data to make it evenly divisible. indices = indices[:self.current_size] assert len(indices) == self.current_size # subsample self.indices_out = self.current_indices[indices].tolist() indices = indices[self.rank:self.current_size:self.num_replicas] # assert len(indices) == self.num_samples # print(indices) indices_out = self.current_indices[indices].tolist() # print(self.rank, len(indices), len(indices_out)) return iter(indices_out) class DistributedMiningSamplerVigor(DistributedSampler[T_co]): def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, rank: Optional[int] = None, shuffle: bool = False, seed: int = 0, drop_last: bool = False, batch_size: int = 128, mode = 'similarity', dim=1000, save_path=None) -> None: super(DistributedMiningSamplerVigor, self).__init__(dataset, num_replicas, rank, shuffle, seed, drop_last) self.dim = dim self.batch_size = batch_size * self.num_replicas self.queue_length = max(dataset.train_data_size, len(dataset.train_sat_cover_list)) self.current_size = len(self.dataset) // self.batch_size * self.batch_size self.current_indices = np.arange(self.current_size) self.queue_size = 1 # for computing moving average, not used in this implementation self.queue = np.zeros([self.queue_length, self.queue_size, self.dim, 2]) self.queue_ptr = 0 self.queue_counter = np.zeros(self.queue_length,dtype=np.int) self.save_path = save_path self.mining_start = 1 self.mining_pool_size = min(40000, len(dataset.train_sat_cover_list)) self.mining_save_size = 100 self.choice_pool = range(self.mining_save_size) self.mining_save = np.zeros([self.queue_length, self.mining_save_size],dtype=int) self.mode = mode # raise Exception def update(self, data_sat, data_grd, indexes): data_sat_norm = data_sat / np.linalg.norm(data_sat, axis=1, keepdims=True) data_grd_norm = data_grd / np.linalg.norm(data_grd, axis=1, keepdims=True) batch_size = data_sat.shape[0] # writing in distributed training style, complicated. Update the queue according to the previous index. for j in range(self.num_replicas): index_j = self.indices_out[j:self.current_size:self.num_replicas] for i in range(batch_size // self.num_replicas): index = index_j[self.queue_ptr + i] %len(self.dataset.train_sat_cover_list) assert indexes[i + j * (batch_size // self.num_replicas)] in self.dataset.train_sat_cover_dict[self.dataset.train_sat_cover_list[index]] self.queue[index, self.queue_counter[index] % self.queue_size, :, 0] = \ data_sat_norm[i + j * (batch_size // self.num_replicas)] self.queue[indexes[i + j * (batch_size // self.num_replicas)], self.queue_counter[index] % self.queue_size, :, 1] = \ data_grd_norm[i + j * (batch_size // self.num_replicas)] self.queue_counter[index] += 1 self.queue_ptr = (self.queue_ptr + batch_size // self.num_replicas) def generate_indices_sim(self): self.queue_ptr = 0 random.seed(7 + self.epoch) self.current_indices = np.arange(self.current_size) %len(self.dataset.train_sat_cover_list) random.shuffle(self.current_indices) if self.epoch >= self.mining_start: assert self.mining_pool_size <= self.queue_length mining_pool = np.array(random.sample(range(len(self.dataset.train_sat_cover_list)), self.mining_pool_size),dtype=int) product_train = np.matmul(self.queue[:,:,:,1].mean(axis=1), np.transpose(self.queue[mining_pool,:,:,0].mean(axis=1))) product_index = np.argsort(product_train, axis=1) # update mining pool for i in range(product_train.shape[0]): self.mining_save[i, :] = mining_pool[product_index[i, -self.mining_save_size:]] # randomly sample the first half ori_list = self.current_indices[:self.current_size//2] self.current_indices = [] # global hard mining for the other half for i in range(self.current_size//self.batch_size): index_s = i * (self.batch_size//2) index_e = index_s + min(self.batch_size//2, self.current_size//2 - index_s) self.current_indices.extend(ori_list[index_s:index_e]) hard_list = [] for j in range(index_s, index_e): grd_id = random.choice(self.dataset.train_sat_cover_dict[self.dataset.train_sat_cover_list[ori_list[j]]]) idx = int(random.choice(self.mining_save[grd_id])) # keep random sampling until there is no overlap in the batch, hard coded as VIGOR is complicated while True: flag = False for grd_idx in self.dataset.train_sat_cover_dict[self.dataset.train_sat_cover_list[idx]]: if not self.dataset.check_overlap(ori_list[index_s:index_e], grd_idx) or not self.dataset.check_overlap(hard_list, grd_idx): flag = True if flag: idx = random.choice(self.mining_save[grd_id]) else: break hard_list.append(idx) self.current_indices.extend(hard_list) self.current_indices = np.array(self.current_indices, dtype=int) assert len(self.current_indices) == self.current_size print('sampler updated!') def update_epoch(self): # if self.epoch >= self.mining_start: self.generate_indices_sim() if self.rank == 0: np.save(os.path.join(self.save_path, 'queue.npy'), self.queue) np.save(os.path.join(self.save_path, 'queue_counter.npy'), self.queue_counter) def load(self, path): self.mining_start = 0 self.queue_counter = np.load(os.path.join(path, 'queue_counter.npy')) self.queue = np.load(os.path.join(path, 'queue.npy')) def __iter__(self) -> Iterator[T_co]: if self.shuffle: # deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(len(self.current_indices), generator=g).tolist() # type: ignore else: indices = list(range(len(self.current_indices))) # type: ignore if not self.drop_last: # add extra samples to make it evenly divisible padding_size = self.current_size - len(indices) if padding_size <= len(indices): indices += indices[:padding_size] else: indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size] else: # remove tail of data to make it evenly divisible. indices = indices[:self.current_size] assert len(indices) == self.current_size # subsample self.indices_out = self.current_indices[indices].tolist() indices = indices[self.rank:self.current_size:self.num_replicas] # assert len(indices) == self.num_samples # print(indices) indices_out = self.current_indices[indices].tolist() # print(self.rank, len(indices), len(indices_out)) return iter(indices_out)
52.938931
152
0.630281
1,835
13,870
4.541689
0.103542
0.060475
0.060475
0.038757
0.901248
0.885049
0.87701
0.850132
0.846172
0.837293
0
0.008119
0.262942
13,870
262
153
52.938931
0.807102
0.100288
0
0.796117
0
0
0.012857
0
0
0
0
0
0.038835
1
0.058252
false
0
0.043689
0
0.121359
0.009709
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bfa2bfed4f8d0c2fbaf2d3a4d2cff7c849d0f626
30
py
Python
tests/t41.py
jplevyak/pyc
9f4bc49be78ba29427841460945ce63826fcd857
[ "BSD-3-Clause" ]
3
2019-08-21T22:01:35.000Z
2021-07-25T00:21:28.000Z
tests/t41.py
jplevyak/pyc
9f4bc49be78ba29427841460945ce63826fcd857
[ "BSD-3-Clause" ]
null
null
null
tests/t41.py
jplevyak/pyc
9f4bc49be78ba29427841460945ce63826fcd857
[ "BSD-3-Clause" ]
null
null
null
x = [1] + [2,3,4,5,6] print x
10
21
0.433333
9
30
1.444444
0.888889
0
0
0
0
0
0
0
0
0
0
0.26087
0.233333
30
2
22
15
0.304348
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.5
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
8
44a58d1217417e349e3a1a158b8541d56f8debff
28
py
Python
test.py
LeisureGuardian/LG-API
103137d4ce1f71e5d9beaf96cbd0fc47fcd575c2
[ "MIT" ]
null
null
null
test.py
LeisureGuardian/LG-API
103137d4ce1f71e5d9beaf96cbd0fc47fcd575c2
[ "MIT" ]
null
null
null
test.py
LeisureGuardian/LG-API
103137d4ce1f71e5d9beaf96cbd0fc47fcd575c2
[ "MIT" ]
null
null
null
print(134.39457 == 134.395)
14
27
0.678571
5
28
3.8
0.8
0
0
0
0
0
0
0
0
0
0
0.56
0.107143
28
1
28
28
0.2
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
7
44b1b5224c415296885b3ec22539f82a9b000ea3
37,627
py
Python
sdk/python/pulumi_gcp/compute/target_https_proxy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
121
2018-06-18T19:16:42.000Z
2022-03-31T06:06:48.000Z
sdk/python/pulumi_gcp/compute/target_https_proxy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
492
2018-06-22T19:41:03.000Z
2022-03-31T15:33:53.000Z
sdk/python/pulumi_gcp/compute/target_https_proxy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
43
2018-06-19T01:43:13.000Z
2022-03-23T22:43:37.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['TargetHttpsProxyArgs', 'TargetHttpsProxy'] @pulumi.input_type class TargetHttpsProxyArgs: def __init__(__self__, *, ssl_certificates: pulumi.Input[Sequence[pulumi.Input[str]]], url_map: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, proxy_bind: Optional[pulumi.Input[bool]] = None, quic_override: Optional[pulumi.Input[str]] = None, ssl_policy: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a TargetHttpsProxy resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. :param pulumi.Input[str] url_map: A reference to the UrlMap resource that defines the mapping from URL to the BackendService. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[bool] proxy_bind: This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[str] quic_override: Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. :param pulumi.Input[str] ssl_policy: A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. """ pulumi.set(__self__, "ssl_certificates", ssl_certificates) pulumi.set(__self__, "url_map", url_map) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if proxy_bind is not None: pulumi.set(__self__, "proxy_bind", proxy_bind) if quic_override is not None: pulumi.set(__self__, "quic_override", quic_override) if ssl_policy is not None: pulumi.set(__self__, "ssl_policy", ssl_policy) @property @pulumi.getter(name="sslCertificates") def ssl_certificates(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: """ A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. """ return pulumi.get(self, "ssl_certificates") @ssl_certificates.setter def ssl_certificates(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "ssl_certificates", value) @property @pulumi.getter(name="urlMap") def url_map(self) -> pulumi.Input[str]: """ A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ return pulumi.get(self, "url_map") @url_map.setter def url_map(self, value: pulumi.Input[str]): pulumi.set(self, "url_map", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="proxyBind") def proxy_bind(self) -> Optional[pulumi.Input[bool]]: """ This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "proxy_bind") @proxy_bind.setter def proxy_bind(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "proxy_bind", value) @property @pulumi.getter(name="quicOverride") def quic_override(self) -> Optional[pulumi.Input[str]]: """ Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. """ return pulumi.get(self, "quic_override") @quic_override.setter def quic_override(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "quic_override", value) @property @pulumi.getter(name="sslPolicy") def ssl_policy(self) -> Optional[pulumi.Input[str]]: """ A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. """ return pulumi.get(self, "ssl_policy") @ssl_policy.setter def ssl_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ssl_policy", value) @pulumi.input_type class _TargetHttpsProxyState: def __init__(__self__, *, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, proxy_bind: Optional[pulumi.Input[bool]] = None, proxy_id: Optional[pulumi.Input[int]] = None, quic_override: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, ssl_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ssl_policy: Optional[pulumi.Input[str]] = None, url_map: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering TargetHttpsProxy resources. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[bool] proxy_bind: This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[int] proxy_id: The unique identifier for the resource. :param pulumi.Input[str] quic_override: Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. :param pulumi.Input[str] ssl_policy: A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. :param pulumi.Input[str] url_map: A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ if creation_timestamp is not None: pulumi.set(__self__, "creation_timestamp", creation_timestamp) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if proxy_bind is not None: pulumi.set(__self__, "proxy_bind", proxy_bind) if proxy_id is not None: pulumi.set(__self__, "proxy_id", proxy_id) if quic_override is not None: pulumi.set(__self__, "quic_override", quic_override) if self_link is not None: pulumi.set(__self__, "self_link", self_link) if ssl_certificates is not None: pulumi.set(__self__, "ssl_certificates", ssl_certificates) if ssl_policy is not None: pulumi.set(__self__, "ssl_policy", ssl_policy) if url_map is not None: pulumi.set(__self__, "url_map", url_map) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> Optional[pulumi.Input[str]]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @creation_timestamp.setter def creation_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "creation_timestamp", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="proxyBind") def proxy_bind(self) -> Optional[pulumi.Input[bool]]: """ This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "proxy_bind") @proxy_bind.setter def proxy_bind(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "proxy_bind", value) @property @pulumi.getter(name="proxyId") def proxy_id(self) -> Optional[pulumi.Input[int]]: """ The unique identifier for the resource. """ return pulumi.get(self, "proxy_id") @proxy_id.setter def proxy_id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "proxy_id", value) @property @pulumi.getter(name="quicOverride") def quic_override(self) -> Optional[pulumi.Input[str]]: """ Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. """ return pulumi.get(self, "quic_override") @quic_override.setter def quic_override(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "quic_override", value) @property @pulumi.getter(name="selfLink") def self_link(self) -> Optional[pulumi.Input[str]]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @self_link.setter def self_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link", value) @property @pulumi.getter(name="sslCertificates") def ssl_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. """ return pulumi.get(self, "ssl_certificates") @ssl_certificates.setter def ssl_certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ssl_certificates", value) @property @pulumi.getter(name="sslPolicy") def ssl_policy(self) -> Optional[pulumi.Input[str]]: """ A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. """ return pulumi.get(self, "ssl_policy") @ssl_policy.setter def ssl_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ssl_policy", value) @property @pulumi.getter(name="urlMap") def url_map(self) -> Optional[pulumi.Input[str]]: """ A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ return pulumi.get(self, "url_map") @url_map.setter def url_map(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_map", value) class TargetHttpsProxy(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, proxy_bind: Optional[pulumi.Input[bool]] = None, quic_override: Optional[pulumi.Input[str]] = None, ssl_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ssl_policy: Optional[pulumi.Input[str]] = None, url_map: Optional[pulumi.Input[str]] = None, __props__=None): """ Represents a TargetHttpsProxy resource, which is used by one or more global forwarding rule to route incoming HTTPS requests to a URL map. To get more information about TargetHttpsProxy, see: * [API documentation](https://cloud.google.com/compute/docs/reference/v1/targetHttpsProxies) * How-to Guides * [Official Documentation](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) ## Example Usage ### Target Https Proxy Basic ```python import pulumi import pulumi_gcp as gcp default_ssl_certificate = gcp.compute.SSLCertificate("defaultSSLCertificate", private_key=(lambda path: open(path).read())("path/to/private.key"), certificate=(lambda path: open(path).read())("path/to/certificate.crt")) default_http_health_check = gcp.compute.HttpHealthCheck("defaultHttpHealthCheck", request_path="/", check_interval_sec=1, timeout_sec=1) default_backend_service = gcp.compute.BackendService("defaultBackendService", port_name="http", protocol="HTTP", timeout_sec=10, health_checks=[default_http_health_check.id]) default_url_map = gcp.compute.URLMap("defaultURLMap", description="a description", default_service=default_backend_service.id, host_rules=[gcp.compute.URLMapHostRuleArgs( hosts=["mysite.com"], path_matcher="allpaths", )], path_matchers=[gcp.compute.URLMapPathMatcherArgs( name="allpaths", default_service=default_backend_service.id, path_rules=[gcp.compute.URLMapPathMatcherPathRuleArgs( paths=["/*"], service=default_backend_service.id, )], )]) default_target_https_proxy = gcp.compute.TargetHttpsProxy("defaultTargetHttpsProxy", url_map=default_url_map.id, ssl_certificates=[default_ssl_certificate.id]) ``` ## Import TargetHttpsProxy can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default projects/{{project}}/global/targetHttpsProxies/{{name}} ``` ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default {{project}}/{{name}} ``` ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[bool] proxy_bind: This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[str] quic_override: Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. :param pulumi.Input[str] ssl_policy: A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. :param pulumi.Input[str] url_map: A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ ... @overload def __init__(__self__, resource_name: str, args: TargetHttpsProxyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Represents a TargetHttpsProxy resource, which is used by one or more global forwarding rule to route incoming HTTPS requests to a URL map. To get more information about TargetHttpsProxy, see: * [API documentation](https://cloud.google.com/compute/docs/reference/v1/targetHttpsProxies) * How-to Guides * [Official Documentation](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) ## Example Usage ### Target Https Proxy Basic ```python import pulumi import pulumi_gcp as gcp default_ssl_certificate = gcp.compute.SSLCertificate("defaultSSLCertificate", private_key=(lambda path: open(path).read())("path/to/private.key"), certificate=(lambda path: open(path).read())("path/to/certificate.crt")) default_http_health_check = gcp.compute.HttpHealthCheck("defaultHttpHealthCheck", request_path="/", check_interval_sec=1, timeout_sec=1) default_backend_service = gcp.compute.BackendService("defaultBackendService", port_name="http", protocol="HTTP", timeout_sec=10, health_checks=[default_http_health_check.id]) default_url_map = gcp.compute.URLMap("defaultURLMap", description="a description", default_service=default_backend_service.id, host_rules=[gcp.compute.URLMapHostRuleArgs( hosts=["mysite.com"], path_matcher="allpaths", )], path_matchers=[gcp.compute.URLMapPathMatcherArgs( name="allpaths", default_service=default_backend_service.id, path_rules=[gcp.compute.URLMapPathMatcherPathRuleArgs( paths=["/*"], service=default_backend_service.id, )], )]) default_target_https_proxy = gcp.compute.TargetHttpsProxy("defaultTargetHttpsProxy", url_map=default_url_map.id, ssl_certificates=[default_ssl_certificate.id]) ``` ## Import TargetHttpsProxy can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default projects/{{project}}/global/targetHttpsProxies/{{name}} ``` ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default {{project}}/{{name}} ``` ```sh $ pulumi import gcp:compute/targetHttpsProxy:TargetHttpsProxy default {{name}} ``` :param str resource_name: The name of the resource. :param TargetHttpsProxyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TargetHttpsProxyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, proxy_bind: Optional[pulumi.Input[bool]] = None, quic_override: Optional[pulumi.Input[str]] = None, ssl_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ssl_policy: Optional[pulumi.Input[str]] = None, url_map: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TargetHttpsProxyArgs.__new__(TargetHttpsProxyArgs) __props__.__dict__["description"] = description __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["proxy_bind"] = proxy_bind __props__.__dict__["quic_override"] = quic_override if ssl_certificates is None and not opts.urn: raise TypeError("Missing required property 'ssl_certificates'") __props__.__dict__["ssl_certificates"] = ssl_certificates __props__.__dict__["ssl_policy"] = ssl_policy if url_map is None and not opts.urn: raise TypeError("Missing required property 'url_map'") __props__.__dict__["url_map"] = url_map __props__.__dict__["creation_timestamp"] = None __props__.__dict__["proxy_id"] = None __props__.__dict__["self_link"] = None super(TargetHttpsProxy, __self__).__init__( 'gcp:compute/targetHttpsProxy:TargetHttpsProxy', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, proxy_bind: Optional[pulumi.Input[bool]] = None, proxy_id: Optional[pulumi.Input[int]] = None, quic_override: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, ssl_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ssl_policy: Optional[pulumi.Input[str]] = None, url_map: Optional[pulumi.Input[str]] = None) -> 'TargetHttpsProxy': """ Get an existing TargetHttpsProxy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[bool] proxy_bind: This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[int] proxy_id: The unique identifier for the resource. :param pulumi.Input[str] quic_override: Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. :param pulumi.Input[str] ssl_policy: A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. :param pulumi.Input[str] url_map: A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TargetHttpsProxyState.__new__(_TargetHttpsProxyState) __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["proxy_bind"] = proxy_bind __props__.__dict__["proxy_id"] = proxy_id __props__.__dict__["quic_override"] = quic_override __props__.__dict__["self_link"] = self_link __props__.__dict__["ssl_certificates"] = ssl_certificates __props__.__dict__["ssl_policy"] = ssl_policy __props__.__dict__["url_map"] = url_map return TargetHttpsProxy(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> pulumi.Output[str]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter(name="proxyBind") def proxy_bind(self) -> pulumi.Output[bool]: """ This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "proxy_bind") @property @pulumi.getter(name="proxyId") def proxy_id(self) -> pulumi.Output[int]: """ The unique identifier for the resource. """ return pulumi.get(self, "proxy_id") @property @pulumi.getter(name="quicOverride") def quic_override(self) -> pulumi.Output[Optional[str]]: """ Specifies the QUIC override policy for this resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value is `NONE`. Possible values are `NONE`, `ENABLE`, and `DISABLE`. """ return pulumi.get(self, "quic_override") @property @pulumi.getter(name="selfLink") def self_link(self) -> pulumi.Output[str]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @property @pulumi.getter(name="sslCertificates") def ssl_certificates(self) -> pulumi.Output[Sequence[str]]: """ A list of SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. """ return pulumi.get(self, "ssl_certificates") @property @pulumi.getter(name="sslPolicy") def ssl_policy(self) -> pulumi.Output[Optional[str]]: """ A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. """ return pulumi.get(self, "ssl_policy") @property @pulumi.getter(name="urlMap") def url_map(self) -> pulumi.Output[str]: """ A reference to the UrlMap resource that defines the mapping from URL to the BackendService. """ return pulumi.get(self, "url_map")
45.886585
139
0.643554
4,456
37,627
5.276481
0.067998
0.065031
0.062521
0.055206
0.912385
0.894012
0.879126
0.86777
0.864835
0.836977
0
0.003527
0.268982
37,627
819
140
45.942613
0.851269
0.490286
0
0.72807
1
0
0.090819
0.002795
0
0
0
0
0
1
0.163743
false
0.002924
0.01462
0
0.277778
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
78225f57af8a4a73caf486c4d8a34af27fcbf919
1,383
py
Python
Server/tests/__init__.py
moreal/DMS-Backend
c0a3b28972739c58049a296570bb873f32c03eec
[ "MIT" ]
27
2018-01-14T08:07:18.000Z
2020-01-20T14:21:17.000Z
Server/tests/__init__.py
moreal/DMS-Backend
c0a3b28972739c58049a296570bb873f32c03eec
[ "MIT" ]
50
2018-02-12T12:51:33.000Z
2018-08-28T00:48:31.000Z
Server/tests/__init__.py
moreal/DMS-Backend
c0a3b28972739c58049a296570bb873f32c03eec
[ "MIT" ]
10
2018-03-31T16:30:32.000Z
2021-03-02T10:30:31.000Z
from .v2.views.admin.account.t_account_management import * from .v2.views.admin.account.t_auth import * from .v2.views.admin.excel.t_extension import * from .v2.views.admin.excel.t_goingout import * from .v2.views.admin.excel.t_stay import * from .v2.views.admin.point.t_point import * from .v2.views.admin.point.t_rule import * from .v2.views.admin.point.t_student import * from .v2.views.admin.post.t_post import * from .v2.views.admin.post.t_preview import * from .v2.views.admin.report.t_facility import * # --- from .v2.views.mixed.jwt.t_checker import * from .v2.views.mixed.jwt.t_refresh import * from .v2.views.mixed.metadata.t_developers import * from .v2.views.mixed.metadata.t_links import * from .v2.views.mixed.metadata.t_version import * from .v2.views.mixed.school_data.t_meal import * # --- from .v2.views.student.account.t_alteration import * from .v2.views.student.account.t_auth import * from .v2.views.student.account.t_info import * from .v2.views.student.account.t_signup import * from .v2.views.student.apply.t_extension.post import * from .v2.views.student.apply.t_extension.get import * from .v2.views.student.apply.t_extension.delete import * from .v2.views.student.apply.t_goingout import * from .v2.views.student.apply.t_stay import * from .v2.views.student.report.t_bug_report import * from .v2.views.student.report.t_facility_report import *
32.928571
58
0.778742
231
1,383
4.52381
0.164502
0.160766
0.294737
0.439234
0.83445
0.778947
0.717703
0.111962
0
0
0
0.022293
0.091829
1,383
42
59
32.928571
0.809713
0.005061
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
782db76eab2d43a3043d2df9fe3b6e737f5a5807
151
py
Python
python-day3/loginapi/app/resources/__init__.py
sunilvijendra/python-flask-course
be49492098df05f28091c71e7eeecb706409a967
[ "MIT" ]
null
null
null
python-day3/loginapi/app/resources/__init__.py
sunilvijendra/python-flask-course
be49492098df05f28091c71e7eeecb706409a967
[ "MIT" ]
null
null
null
python-day3/loginapi/app/resources/__init__.py
sunilvijendra/python-flask-course
be49492098df05f28091c71e7eeecb706409a967
[ "MIT" ]
1
2018-07-16T07:01:15.000Z
2018-07-16T07:01:15.000Z
from loginapi.app import api from . import Users #api.add_resource(User, '/user') api.add_resource(Users, '/users') #api.add_resource(Login, '/login')
25.166667
34
0.741722
23
151
4.73913
0.434783
0.165138
0.385321
0.348624
0
0
0
0
0
0
0
0
0.092715
151
6
34
25.166667
0.79562
0.423841
0
0
0
0
0.069767
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
78793b46aa218c718f1838bcb93b8ab8978579ca
186
py
Python
visualize/__init__.py
lamnhh/AI-assignment-1
cf4cd66afb4726b64f37110b358ae3483379c895
[ "MIT" ]
null
null
null
visualize/__init__.py
lamnhh/AI-assignment-1
cf4cd66afb4726b64f37110b358ae3483379c895
[ "MIT" ]
null
null
null
visualize/__init__.py
lamnhh/AI-assignment-1
cf4cd66afb4726b64f37110b358ae3483379c895
[ "MIT" ]
null
null
null
from visualize.visualize2d import visualize as visualize2d from visualize.visualize3d import visualize as visualize3d from visualize.input_visualiser import visualize as visualize_input
46.5
67
0.887097
23
186
7.086957
0.347826
0.239264
0.312883
0
0
0
0
0
0
0
0
0.02381
0.096774
186
3
68
62
0.946429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
788644fe9c8256333a0f31c5365d936cc9d076b6
3,035
py
Python
primer.py
chrisalopez1337/python-algos
560912b5b34f3fe7286e7ecff48097da766e11e1
[ "MIT" ]
null
null
null
primer.py
chrisalopez1337/python-algos
560912b5b34f3fe7286e7ecff48097da766e11e1
[ "MIT" ]
null
null
null
primer.py
chrisalopez1337/python-algos
560912b5b34f3fe7286e7ecff48097da766e11e1
[ "MIT" ]
null
null
null
# Some of the old interview questions for primer. import json docid_to_title = json.loads('{"28099931907": {"content": {"title": "Zika dollars and Internet fight in play as government shutdown looms"}}, "28108959337": {"content": {"title": "Leaders express renewed optimism about flood relief as Congress aims to avoid a government shutdown"}}, "28110814722": {"content": {"title": "Senate passes stopgap spending bill, $1.1B to fight Zika"}}, "28101422554": {"content": {"title": "Senate blocks bill to avert shutdown as De..."}}, "28099132063": {"content": {"title": "Senate Dems threaten shutdown over Flint aid"}}, "28101167069": {"content": {"title": "Zika dollars, internet fight in play as government shutdown looms"}}, "28102817621": {"content": {"title": "House leaders reach deal on Flint aid, potentially averting shutdown"}}, "28108039075": {"content": {"title": "Deal reached to keep US government running, help Flint"}}, "28098698025": {"content": {"title": "Defeat of Republicans\\u2019 stop-gap spending bill brings federal government closer to a shutdown"}}, "28097027782": {"content": {"title": "Will Democrats force a government shutdown over Flint relief funds?"}}, "28111131355": {"content": {"title": "Lawmakers strike deal to avoid shutdown"}}, "28110856451": {"content": {"title": "Louisiana flood aid, $500 million, survives Senate budget fight"}}, "28106625838": {"content": {"title": "Pelosi aide says deal with Speaker Ryan covers Flint aid"}}, "28099931906": {"content": {"title": "Zika dollars and Internet fight in play as government shutdown looms"}}, "28096859458": {"content": {"title": "Are Democrats pushing the federal government toward a shutdown?"}}, "28110814723": {"content": {"title": "Senate passes stopgap spending bill, $1.1B to fight Zika"}}, "28093906031": {"content": {"title": "will democrats force a federal shutdown over Flint relief funds?"}}, "28108873175": {"content": {"title": "Senate Democrats accept deal on Flint aid, potentially averting shutdown", "sources": ["ABC", "FOX"]}}, "28103573587": {"content": {"title": "ALEXANDER : Senate Democrats\\u2019 Election Politics Are Putting Babies at Risk"}}, "28096849799": {"content": {"title": "Democrats poised to block stopgap funding bill over Flint"}}, "28098943696": {"content": {"title": "Senate blocks stopgap bill to prevent shutdown this weekend"}}, "28110942840": {"content": {"title": "Senate Passes Short-Term Government Funding Measure"}}, "28107038234": {"content": {"title": "House aides: Deal reached to help Flint, keep US gov\'t open"}}, "28084509076": {"content": {"title": "Week ahead: Spending fight shifts from Zika to Flint"}}, "28089633072": {"content": {"title": "Flint water aid spending bill\'s sticking point"}}, "28099749478": {"content": {"title": "Government shutdown looms due to partisan clash over Flint aid"}}, "28099468922": {"content": {"title": "Funding bill rejected as shutdown nears"}}, "28107540657": {"content": null}, "28098940419": {"content": {"title": ""}}'); print(docid_to_title);
505.833333
2,948
0.712356
370
3,035
5.832432
0.432432
0.1557
0.058387
0.031974
0.230769
0.204819
0.176089
0.138091
0.117702
0.117702
0
0.12486
0.118616
3,035
5
2,949
607
0.681869
0.015486
0
0
0
0.666667
0.925653
0
0
0
0
0
0
1
0
false
0.333333
0.333333
0
0.333333
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
7
78a5c01c29b68de4286a88d999a1dd1592f69ee2
225
py
Python
redant_libs/framework_mixin.py
srijan-sivakumar/dummy_redant
36f30548331d67f38975e22efbb4cae953faaea3
[ "MIT" ]
null
null
null
redant_libs/framework_mixin.py
srijan-sivakumar/dummy_redant
36f30548331d67f38975e22efbb4cae953faaea3
[ "MIT" ]
null
null
null
redant_libs/framework_mixin.py
srijan-sivakumar/dummy_redant
36f30548331d67f38975e22efbb4cae953faaea3
[ "MIT" ]
null
null
null
from redant_libs.ops_libs.peer_ops import peer_ops from redant_libs.support_libs.rexe import Remote_ops from redant_libs.ops_libs.volume_ops import volume_ops class frameworkMixin(Remote_ops, peer_ops, volume_ops): pass
32.142857
55
0.853333
38
225
4.684211
0.342105
0.168539
0.235955
0.191011
0.235955
0
0
0
0
0
0
0
0.097778
225
6
56
37.5
0.876847
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.6
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
7
1532932461b530afa3acc56936b9b50794ae7f94
4,634
py
Python
python/test_alpenglow/evaluation/test_DcgScore.py
fbobee/Alpenglow
5f956511017c1bee72390aaecd964c04d8ad4b45
[ "Apache-2.0" ]
28
2017-07-23T22:47:44.000Z
2022-03-12T15:11:13.000Z
python/test_alpenglow/evaluation/test_DcgScore.py
fbobee/Alpenglow
5f956511017c1bee72390aaecd964c04d8ad4b45
[ "Apache-2.0" ]
4
2017-05-10T10:23:17.000Z
2019-05-23T14:07:09.000Z
python/test_alpenglow/evaluation/test_DcgScore.py
fbobee/Alpenglow
5f956511017c1bee72390aaecd964c04d8ad4b45
[ "Apache-2.0" ]
9
2017-05-04T09:20:58.000Z
2021-12-14T08:19:01.000Z
import alpenglow as prs import alpenglow.Getter as rs import alpenglow.evaluation import pandas as pd import math import unittest class TestDcgScore(unittest.TestCase): def test_dcgScore(self): ranks = [102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 65, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 18, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 67, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 58, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 102, 100, 100, 100, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 47, 102, 102, 102, 100, 100, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 54, 100, 102, 100, 100, 102, 102, 100, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 100, 102, 100, 100, 47, 102, 100, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 100, 100, 100, 102, 100, 102, 102, 100, 102, 102, 100, 100, 37, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 55, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 22, 102, 100, 100, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 100, 100, 102, 100, 102, 102, 100, 100, 100, 102, 102, 100, 100, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 21, 100, 102, 100, 102, 100, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 100, 100, 102, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 100, 102, 100, 102, 100, 100, 102, 100, 102, 100, 100, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 100, 102, 102, 102, 100, 102, 100, 102, 102, 102, 100, 102, 102, 102, 102, 102, 100, 102, 100, 100, 100, 102, 100, 102, 100, 102, 102, 100, 100, 100, 100, 100, 100, 102, 88, 102, 102, 102, 100, 102, 100, 100, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 100, 100, 100, 100, 102, 100, 100, 102, 102, 100, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 14, 102, 100, 102, 102, 102, 102, 100, 102, 100, 100, 100, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 100, 100, 100, 102, 102, 102, 102, 102, 102, 100, 102, 100, 100, 102, 100, 100, 102, 102, 100, 100, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 100, 100, 102, 102, 100, 102, 100, 102, 102, 100] facRankings = pd.DataFrame.from_records( [ (i, i, 0, r + 1 if r < 100 else None) for i, r in enumerate(ranks) ], columns=["id", "time", "prediction", "rank"] ).set_index("id") facRankings.top_k = 100 dcg = alpenglow.evaluation.DcgScore(facRankings).mean() dcgs = [math.log(2) / math.log(r + 2) if r < 100 else 0 for r in ranks] self.assertAlmostEqual(dcg, sum(dcgs) / len(dcgs))
201.478261
3,938
0.598403
879
4,634
3.150171
0.07281
0.953413
1.105092
1.191766
0.83857
0.83857
0.836403
0.83532
0.814735
0.782232
0
0.649079
0.214717
4,634
22
3,939
210.636364
0.111844
0
0
0
0
0
0.004748
0
0
0
0
0
0.05
1
0.05
false
0
0.3
0
0.4
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
13
1549cc4761b55f7cb9c93e9077d3ecfc88b40f7c
189
py
Python
extensions/lrc_editor/__init__.py
Tang142857/MyEditor
2d532eecfa6c48719cf6db99495a910ddd0ff52c
[ "MulanPSL-1.0" ]
null
null
null
extensions/lrc_editor/__init__.py
Tang142857/MyEditor
2d532eecfa6c48719cf6db99495a910ddd0ff52c
[ "MulanPSL-1.0" ]
null
null
null
extensions/lrc_editor/__init__.py
Tang142857/MyEditor
2d532eecfa6c48719cf6db99495a910ddd0ff52c
[ "MulanPSL-1.0" ]
null
null
null
""" lyrics file editor for ME @author: Tang142857 @file: __init__.py ,Create at: 2021-02-15 Copyright(c): DFSA Software Develop Center """ print(f'Importing lrc_extension at {__file__}')
18.9
47
0.740741
28
189
4.678571
0.892857
0
0
0
0
0
0
0
0
0
0
0.085366
0.132275
189
9
48
21
0.713415
0.693122
0
0
0
0
0.74
0
0
0
0
0
0
1
0
true
0
1
0
1
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
7
ece79794dc55d45c8a4e4410efd61b785e56f63a
177
py
Python
knx_stack/decode/layer/network/__init__.py
majamassarini/knx-stack
11a9baac6b7600649b5fbca43c93b200b23676b4
[ "MIT" ]
2
2021-07-28T07:42:28.000Z
2022-01-25T18:56:05.000Z
knx_stack/decode/layer/network/__init__.py
majamassarini/knx-stack
11a9baac6b7600649b5fbca43c93b200b23676b4
[ "MIT" ]
6
2021-07-25T21:36:01.000Z
2022-02-20T21:11:31.000Z
knx_stack/decode/layer/network/__init__.py
majamassarini/knx-stack
11a9baac6b7600649b5fbca43c93b200b23676b4
[ "MIT" ]
null
null
null
from knx_stack.decode.layer.network import n_data_broadcast from knx_stack.decode.layer.network import n_data_group from knx_stack.decode.layer.network import n_data_individual
44.25
60
0.881356
30
177
4.9
0.4
0.142857
0.244898
0.367347
0.836735
0.836735
0.836735
0.836735
0.836735
0
0
0
0.067797
177
3
61
59
0.890909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
11
01d90c21b4cacb5ed3ce2a0cc9107bb62085e09e
14,456
py
Python
computational-harmony/majorMajorNetwork.py
TylerMclaughlin/computational-harmony
3a3a10aa46738f6761fa3df19df62c70a66afdf2
[ "MIT" ]
5
2019-06-04T00:22:15.000Z
2021-05-06T17:48:39.000Z
computational-harmony/majorMajorNetwork.py
TylerMclaughlin/computational-harmony
3a3a10aa46738f6761fa3df19df62c70a66afdf2
[ "MIT" ]
null
null
null
computational-harmony/majorMajorNetwork.py
TylerMclaughlin/computational-harmony
3a3a10aa46738f6761fa3df19df62c70a66afdf2
[ "MIT" ]
1
2017-08-10T18:12:11.000Z
2017-08-10T18:12:11.000Z
[('C', 'Major'), ('C sharp', 'Major'), [0, 5], 2] [('C sharp', 'Major'), ('G', 'Major'), [0, 6], 2] [('G', 'Major'), ('C sharp', 'Major'), [0, 6], 2] [('C sharp', 'Major'), ('C', 'Major'), [0, 5], 2] [('C', 'Major'), ('B', 'Major'), [11, 4], 2] [('B', 'Major'), ('B flat', 'Major'), [10, 3], 2] [('B flat', 'Major'), ('A', 'Major'), [9, 2], 2] [('B flat', 'Major'), ('B', 'Major'), [10, 3], 2] [('B flat', 'Major'), ('E', 'Major'), [9, 3], 2] [('B flat', 'Major'), ('F sharp', 'Major'), [10, 3, 5], 3] [('B flat', 'Major'), ('D', 'Major'), [9, 2, 7], 3] [('B flat', 'Major'), ('C sharp', 'Major'), [0, 10, 3, 5], 4] [('B flat', 'Major'), ('G', 'Major'), [0, 9, 2, 7], 4] [('B flat', 'Major'), ('G sharp', 'Major'), [0, 10, 3, 5, 7], 5] [('B flat', 'Major'), ('C', 'Major'), [0, 9, 2, 5, 7], 5] [('B flat', 'Major'), ('E flat', 'Major'), [0, 2, 3, 5, 7, 10], 6] [('B flat', 'Major'), ('F', 'Major'), [0, 2, 5, 7, 9, 10], 6] [('B', 'Major'), ('C', 'Major'), [11, 4], 2] [('C', 'Major'), ('F sharp', 'Major'), [11, 5], 2] [('C', 'Major'), ('G sharp', 'Major'), [0, 5, 7], 3] [('C', 'Major'), ('E', 'Major'), [9, 11, 4], 3] [('C', 'Major'), ('A', 'Major'), [9, 2, 11, 4], 4] [('C', 'Major'), ('E flat', 'Major'), [0, 2, 5, 7], 4] [('C', 'Major'), ('B flat', 'Major'), [0, 9, 2, 5, 7], 5] [('C', 'Major'), ('D', 'Major'), [9, 2, 11, 4, 7], 5] [('C', 'Major'), ('G', 'Major'), [0, 2, 4, 7, 9, 11], 6] [('C', 'Major'), ('F', 'Major'), [0, 2, 4, 5, 7, 9], 6] [('B', 'Major'), ('F', 'Major'), [10, 4], 2] [('F', 'Major'), ('B', 'Major'), [10, 4], 2] [('F', 'Major'), ('F sharp', 'Major'), [10, 5], 2] [('F', 'Major'), ('E', 'Major'), [9, 4], 2] [('F', 'Major'), ('A', 'Major'), [9, 2, 4], 3] [('F', 'Major'), ('C sharp', 'Major'), [0, 10, 5], 3] [('F', 'Major'), ('G sharp', 'Major'), [0, 10, 5, 7], 4] [('F', 'Major'), ('D', 'Major'), [9, 2, 4, 7], 4] [('F', 'Major'), ('G', 'Major'), [0, 9, 2, 4, 7], 5] [('F', 'Major'), ('E flat', 'Major'), [0, 2, 10, 5, 7], 5] [('F', 'Major'), ('B flat', 'Major'), [0, 2, 5, 7, 9, 10], 6] [('F', 'Major'), ('C', 'Major'), [0, 2, 4, 5, 7, 9], 6] [('B', 'Major'), ('G', 'Major'), [11, 4, 6], 3] [('G', 'Major'), ('G sharp', 'Major'), [0, 7], 2] [('G', 'Major'), ('F sharp', 'Major'), [11, 6], 2] [('G', 'Major'), ('B', 'Major'), [11, 4, 6], 3] [('G', 'Major'), ('E flat', 'Major'), [0, 2, 7], 3] [('G', 'Major'), ('B flat', 'Major'), [0, 9, 2, 7], 4] [('G', 'Major'), ('E', 'Major'), [9, 11, 4, 6], 4] [('G', 'Major'), ('A', 'Major'), [9, 2, 11, 4, 6], 5] [('G', 'Major'), ('F', 'Major'), [0, 9, 2, 4, 7], 5] [('G', 'Major'), ('C', 'Major'), [0, 2, 4, 7, 9, 11], 6] [('G', 'Major'), ('D', 'Major'), [2, 4, 6, 7, 9, 11], 6] [('B', 'Major'), ('E flat', 'Major'), [8, 10, 3], 3] [('E flat', 'Major'), ('A', 'Major'), [8, 2], 2] [('E flat', 'Major'), ('E', 'Major'), [8, 3], 2] [('E flat', 'Major'), ('D', 'Major'), [2, 7], 2] [('E flat', 'Major'), ('B', 'Major'), [8, 10, 3], 3] [('E flat', 'Major'), ('G', 'Major'), [0, 2, 7], 3] [('E flat', 'Major'), ('F sharp', 'Major'), [8, 10, 3, 5], 4] [('E flat', 'Major'), ('C', 'Major'), [0, 2, 5, 7], 4] [('E flat', 'Major'), ('C sharp', 'Major'), [0, 8, 10, 3, 5], 5] [('E flat', 'Major'), ('F', 'Major'), [0, 2, 10, 5, 7], 5] [('E flat', 'Major'), ('B flat', 'Major'), [0, 2, 3, 5, 7, 10], 6] [('E flat', 'Major'), ('G sharp', 'Major'), [0, 3, 5, 7, 8, 10], 6] [('B', 'Major'), ('G sharp', 'Major'), [8, 1, 10, 3], 4] [('G sharp', 'Major'), ('A', 'Major'), [8, 1], 2] [('G sharp', 'Major'), ('G', 'Major'), [0, 7], 2] [('G sharp', 'Major'), ('D', 'Major'), [1, 7], 2] [('G sharp', 'Major'), ('C', 'Major'), [0, 5, 7], 3] [('G sharp', 'Major'), ('E', 'Major'), [8, 1, 3], 3] [('G sharp', 'Major'), ('B', 'Major'), [8, 1, 10, 3], 4] [('G sharp', 'Major'), ('F', 'Major'), [0, 10, 5, 7], 4] [('G sharp', 'Major'), ('B flat', 'Major'), [0, 10, 3, 5, 7], 5] [('G sharp', 'Major'), ('F sharp', 'Major'), [8, 1, 10, 3, 5], 5] [('G sharp', 'Major'), ('C sharp', 'Major'), [0, 1, 3, 5, 8, 10], 6] [('G sharp', 'Major'), ('E flat', 'Major'), [0, 3, 5, 7, 8, 10], 6] [('B', 'Major'), ('D', 'Major'), [1, 11, 4, 6], 4] [('D', 'Major'), ('C sharp', 'Major'), [1, 6], 2] [('D', 'Major'), ('G sharp', 'Major'), [1, 7], 2] [('D', 'Major'), ('E flat', 'Major'), [2, 7], 2] [('D', 'Major'), ('B flat', 'Major'), [9, 2, 7], 3] [('D', 'Major'), ('F sharp', 'Major'), [1, 11, 6], 3] [('D', 'Major'), ('B', 'Major'), [1, 11, 4, 6], 4] [('D', 'Major'), ('F', 'Major'), [9, 2, 4, 7], 4] [('D', 'Major'), ('C', 'Major'), [9, 2, 11, 4, 7], 5] [('D', 'Major'), ('E', 'Major'), [1, 11, 4, 6, 9], 5] [('D', 'Major'), ('A', 'Major'), [1, 2, 4, 6, 9, 11], 6] [('D', 'Major'), ('G', 'Major'), [2, 4, 6, 7, 9, 11], 6] [('B', 'Major'), ('A', 'Major'), [8, 1, 11, 4, 6], 5] [('A', 'Major'), ('B flat', 'Major'), [9, 2], 2] [('A', 'Major'), ('G sharp', 'Major'), [8, 1], 2] [('A', 'Major'), ('E flat', 'Major'), [8, 2], 2] [('A', 'Major'), ('C sharp', 'Major'), [8, 1, 6], 3] [('A', 'Major'), ('F', 'Major'), [9, 2, 4], 3] [('A', 'Major'), ('F sharp', 'Major'), [8, 1, 11, 6], 4] [('A', 'Major'), ('C', 'Major'), [9, 2, 11, 4], 4] [('A', 'Major'), ('B', 'Major'), [8, 1, 11, 4, 6], 5] [('A', 'Major'), ('G', 'Major'), [9, 2, 11, 4, 6], 5] [('A', 'Major'), ('E', 'Major'), [1, 4, 6, 8, 9, 11], 6] [('A', 'Major'), ('D', 'Major'), [1, 2, 4, 6, 9, 11], 6] [('B', 'Major'), ('C sharp', 'Major'), [8, 1, 10, 3, 6], 5] [('C sharp', 'Major'), ('D', 'Major'), [1, 6], 2] [('C sharp', 'Major'), ('A', 'Major'), [8, 1, 6], 3] [('C sharp', 'Major'), ('F', 'Major'), [0, 10, 5], 3] [('C sharp', 'Major'), ('B flat', 'Major'), [0, 10, 3, 5], 4] [('C sharp', 'Major'), ('E', 'Major'), [8, 1, 3, 6], 4] [('C sharp', 'Major'), ('B', 'Major'), [8, 1, 10, 3, 6], 5] [('C sharp', 'Major'), ('E flat', 'Major'), [0, 8, 10, 3, 5], 5] [('C sharp', 'Major'), ('G sharp', 'Major'), [0, 1, 3, 5, 8, 10], 6] [('C sharp', 'Major'), ('F sharp', 'Major'), [1, 3, 5, 6, 8, 10], 6] [('B', 'Major'), ('F sharp', 'Major'), [1, 3, 6, 8, 10, 11], 6] [('F sharp', 'Major'), ('G', 'Major'), [11, 6], 2] [('F sharp', 'Major'), ('C', 'Major'), [11, 5], 2] [('F sharp', 'Major'), ('F', 'Major'), [10, 5], 2] [('F sharp', 'Major'), ('B flat', 'Major'), [10, 3, 5], 3] [('F sharp', 'Major'), ('D', 'Major'), [1, 11, 6], 3] [('F sharp', 'Major'), ('A', 'Major'), [8, 1, 11, 6], 4] [('F sharp', 'Major'), ('E flat', 'Major'), [8, 10, 3, 5], 4] [('F sharp', 'Major'), ('G sharp', 'Major'), [8, 1, 10, 3, 5], 5] [('F sharp', 'Major'), ('E', 'Major'), [8, 1, 3, 6, 11], 5] [('F sharp', 'Major'), ('C sharp', 'Major'), [1, 3, 5, 6, 8, 10], 6] [('F sharp', 'Major'), ('B', 'Major'), [1, 3, 6, 8, 10, 11], 6] [('B', 'Major'), ('E', 'Major'), [1, 3, 4, 6, 8, 11], 6] [('E', 'Major'), ('B flat', 'Major'), [9, 3], 2] [('E', 'Major'), ('E flat', 'Major'), [8, 3], 2] [('E', 'Major'), ('F', 'Major'), [9, 4], 2] [('E', 'Major'), ('G sharp', 'Major'), [8, 1, 3], 3] [('E', 'Major'), ('C', 'Major'), [9, 11, 4], 3] [('E', 'Major'), ('C sharp', 'Major'), [8, 1, 3, 6], 4] [('E', 'Major'), ('G', 'Major'), [9, 11, 4, 6], 4] [('E', 'Major'), ('F sharp', 'Major'), [8, 1, 3, 6, 11], 5] [('E', 'Major'), ('D', 'Major'), [1, 11, 4, 6, 9], 5] [('E', 'Major'), ('A', 'Major'), [1, 4, 6, 8, 9, 11], 6] [('E', 'Major'), ('B', 'Major'), [1, 3, 4, 6, 8, 11], 6] [('C', 'Major'), ('C sharp', 'Major'), [0, 5], 2] [('C sharp', 'Major'), ('G', 'Major'), [0, 6], 2] [('G', 'Major'), ('C sharp', 'Major'), [0, 6], 2] [('C sharp', 'Major'), ('C', 'Major'), [0, 5], 2] [('C', 'Major'), ('B', 'Major'), [11, 4], 2] [('B', 'Major'), ('B flat', 'Major'), [10, 3], 2] [('B flat', 'Major'), ('A', 'Major'), [9, 2], 2] [('A', 'Major'), ('B flat', 'Major'), [9, 2], 2] [('B flat', 'Major'), ('B', 'Major'), [10, 3], 2] [('B', 'Major'), ('C', 'Major'), [11, 4], 2] [('C', 'Major'), ('F sharp', 'Major'), [11, 5], 2] [('F sharp', 'Major'), ('G', 'Major'), [11, 6], 2] [('F sharp', 'Major'), ('C', 'Major'), [11, 5], 2] [('F sharp', 'Major'), ('F', 'Major'), [10, 5], 2] [('F sharp', 'Major'), ('B flat', 'Major'), [10, 3, 5], 3] [('F sharp', 'Major'), ('D', 'Major'), [1, 11, 6], 3] [('F sharp', 'Major'), ('A', 'Major'), [8, 1, 11, 6], 4] [('F sharp', 'Major'), ('E flat', 'Major'), [8, 10, 3, 5], 4] [('F sharp', 'Major'), ('G sharp', 'Major'), [8, 1, 10, 3, 5], 5] [('F sharp', 'Major'), ('E', 'Major'), [8, 1, 3, 6, 11], 5] [('F sharp', 'Major'), ('C sharp', 'Major'), [1, 3, 5, 6, 8, 10], 6] [('F sharp', 'Major'), ('B', 'Major'), [1, 3, 6, 8, 10, 11], 6] [('C', 'Major'), ('G sharp', 'Major'), [0, 5, 7], 3] [('G sharp', 'Major'), ('A', 'Major'), [8, 1], 2] [('G sharp', 'Major'), ('G', 'Major'), [0, 7], 2] [('G sharp', 'Major'), ('D', 'Major'), [1, 7], 2] [('G sharp', 'Major'), ('C', 'Major'), [0, 5, 7], 3] [('G sharp', 'Major'), ('E', 'Major'), [8, 1, 3], 3] [('G sharp', 'Major'), ('B', 'Major'), [8, 1, 10, 3], 4] [('G sharp', 'Major'), ('F', 'Major'), [0, 10, 5, 7], 4] [('G sharp', 'Major'), ('B flat', 'Major'), [0, 10, 3, 5, 7], 5] [('G sharp', 'Major'), ('F sharp', 'Major'), [8, 1, 10, 3, 5], 5] [('G sharp', 'Major'), ('C sharp', 'Major'), [0, 1, 3, 5, 8, 10], 6] [('G sharp', 'Major'), ('E flat', 'Major'), [0, 3, 5, 7, 8, 10], 6] [('C', 'Major'), ('E', 'Major'), [9, 11, 4], 3] [('E', 'Major'), ('B flat', 'Major'), [9, 3], 2] [('E', 'Major'), ('E flat', 'Major'), [8, 3], 2] [('E', 'Major'), ('F', 'Major'), [9, 4], 2] [('E', 'Major'), ('G sharp', 'Major'), [8, 1, 3], 3] [('E', 'Major'), ('C', 'Major'), [9, 11, 4], 3] [('E', 'Major'), ('C sharp', 'Major'), [8, 1, 3, 6], 4] [('E', 'Major'), ('G', 'Major'), [9, 11, 4, 6], 4] [('E', 'Major'), ('F sharp', 'Major'), [8, 1, 3, 6, 11], 5] [('E', 'Major'), ('D', 'Major'), [1, 11, 4, 6, 9], 5] [('E', 'Major'), ('A', 'Major'), [1, 4, 6, 8, 9, 11], 6] [('E', 'Major'), ('B', 'Major'), [1, 3, 4, 6, 8, 11], 6] [('C', 'Major'), ('A', 'Major'), [9, 2, 11, 4], 4] [('A', 'Major'), ('G sharp', 'Major'), [8, 1], 2] [('A', 'Major'), ('E flat', 'Major'), [8, 2], 2] [('A', 'Major'), ('C sharp', 'Major'), [8, 1, 6], 3] [('A', 'Major'), ('F', 'Major'), [9, 2, 4], 3] [('A', 'Major'), ('F sharp', 'Major'), [8, 1, 11, 6], 4] [('A', 'Major'), ('C', 'Major'), [9, 2, 11, 4], 4] [('A', 'Major'), ('B', 'Major'), [8, 1, 11, 4, 6], 5] [('A', 'Major'), ('G', 'Major'), [9, 2, 11, 4, 6], 5] [('A', 'Major'), ('E', 'Major'), [1, 4, 6, 8, 9, 11], 6] [('A', 'Major'), ('D', 'Major'), [1, 2, 4, 6, 9, 11], 6] [('C', 'Major'), ('E flat', 'Major'), [0, 2, 5, 7], 4] [('E flat', 'Major'), ('A', 'Major'), [8, 2], 2] [('E flat', 'Major'), ('E', 'Major'), [8, 3], 2] [('E flat', 'Major'), ('D', 'Major'), [2, 7], 2] [('E flat', 'Major'), ('B', 'Major'), [8, 10, 3], 3] [('E flat', 'Major'), ('G', 'Major'), [0, 2, 7], 3] [('E flat', 'Major'), ('F sharp', 'Major'), [8, 10, 3, 5], 4] [('E flat', 'Major'), ('C', 'Major'), [0, 2, 5, 7], 4] [('E flat', 'Major'), ('C sharp', 'Major'), [0, 8, 10, 3, 5], 5] [('E flat', 'Major'), ('F', 'Major'), [0, 2, 10, 5, 7], 5] [('E flat', 'Major'), ('B flat', 'Major'), [0, 2, 3, 5, 7, 10], 6] [('E flat', 'Major'), ('G sharp', 'Major'), [0, 3, 5, 7, 8, 10], 6] [('C', 'Major'), ('B flat', 'Major'), [0, 9, 2, 5, 7], 5] [('B flat', 'Major'), ('E', 'Major'), [9, 3], 2] [('B flat', 'Major'), ('F sharp', 'Major'), [10, 3, 5], 3] [('B flat', 'Major'), ('D', 'Major'), [9, 2, 7], 3] [('B flat', 'Major'), ('C sharp', 'Major'), [0, 10, 3, 5], 4] [('B flat', 'Major'), ('G', 'Major'), [0, 9, 2, 7], 4] [('B flat', 'Major'), ('G sharp', 'Major'), [0, 10, 3, 5, 7], 5] [('B flat', 'Major'), ('C', 'Major'), [0, 9, 2, 5, 7], 5] [('B flat', 'Major'), ('E flat', 'Major'), [0, 2, 3, 5, 7, 10], 6] [('B flat', 'Major'), ('F', 'Major'), [0, 2, 5, 7, 9, 10], 6] [('C', 'Major'), ('D', 'Major'), [9, 2, 11, 4, 7], 5] [('D', 'Major'), ('C sharp', 'Major'), [1, 6], 2] [('D', 'Major'), ('G sharp', 'Major'), [1, 7], 2] [('D', 'Major'), ('E flat', 'Major'), [2, 7], 2] [('D', 'Major'), ('B flat', 'Major'), [9, 2, 7], 3] [('D', 'Major'), ('F sharp', 'Major'), [1, 11, 6], 3] [('D', 'Major'), ('B', 'Major'), [1, 11, 4, 6], 4] [('D', 'Major'), ('F', 'Major'), [9, 2, 4, 7], 4] [('D', 'Major'), ('C', 'Major'), [9, 2, 11, 4, 7], 5] [('D', 'Major'), ('E', 'Major'), [1, 11, 4, 6, 9], 5] [('D', 'Major'), ('A', 'Major'), [1, 2, 4, 6, 9, 11], 6] [('D', 'Major'), ('G', 'Major'), [2, 4, 6, 7, 9, 11], 6] [('C', 'Major'), ('G', 'Major'), [0, 2, 4, 7, 9, 11], 6] [('G', 'Major'), ('G sharp', 'Major'), [0, 7], 2] [('G', 'Major'), ('F sharp', 'Major'), [11, 6], 2] [('G', 'Major'), ('B', 'Major'), [11, 4, 6], 3] [('G', 'Major'), ('E flat', 'Major'), [0, 2, 7], 3] [('G', 'Major'), ('B flat', 'Major'), [0, 9, 2, 7], 4] [('G', 'Major'), ('E', 'Major'), [9, 11, 4, 6], 4] [('G', 'Major'), ('A', 'Major'), [9, 2, 11, 4, 6], 5] [('G', 'Major'), ('F', 'Major'), [0, 9, 2, 4, 7], 5] [('G', 'Major'), ('C', 'Major'), [0, 2, 4, 7, 9, 11], 6] [('G', 'Major'), ('D', 'Major'), [2, 4, 6, 7, 9, 11], 6] [('C', 'Major'), ('F', 'Major'), [0, 2, 4, 5, 7, 9], 6] [('F', 'Major'), ('B', 'Major'), [10, 4], 2] [('F', 'Major'), ('F sharp', 'Major'), [10, 5], 2] [('F', 'Major'), ('E', 'Major'), [9, 4], 2] [('F', 'Major'), ('A', 'Major'), [9, 2, 4], 3] [('F', 'Major'), ('C sharp', 'Major'), [0, 10, 5], 3] [('F', 'Major'), ('G sharp', 'Major'), [0, 10, 5, 7], 4] [('F', 'Major'), ('D', 'Major'), [9, 2, 4, 7], 4] [('F', 'Major'), ('G', 'Major'), [0, 9, 2, 4, 7], 5] [('F', 'Major'), ('E flat', 'Major'), [0, 2, 10, 5, 7], 5] [('F', 'Major'), ('B flat', 'Major'), [0, 2, 5, 7, 9, 10], 6] [('F', 'Major'), ('C', 'Major'), [0, 2, 4, 5, 7, 9], 6] [('B', 'Major'), ('F', 'Major'), [10, 4], 2] [('B', 'Major'), ('G', 'Major'), [11, 4, 6], 3] [('B', 'Major'), ('E flat', 'Major'), [8, 10, 3], 3] [('B', 'Major'), ('G sharp', 'Major'), [8, 1, 10, 3], 4] [('B', 'Major'), ('D', 'Major'), [1, 11, 4, 6], 4] [('B', 'Major'), ('A', 'Major'), [8, 1, 11, 4, 6], 5] [('B', 'Major'), ('C sharp', 'Major'), [8, 1, 10, 3, 6], 5] [('C sharp', 'Major'), ('D', 'Major'), [1, 6], 2] [('C sharp', 'Major'), ('A', 'Major'), [8, 1, 6], 3] [('C sharp', 'Major'), ('F', 'Major'), [0, 10, 5], 3] [('C sharp', 'Major'), ('B flat', 'Major'), [0, 10, 3, 5], 4] [('C sharp', 'Major'), ('E', 'Major'), [8, 1, 3, 6], 4] [('C sharp', 'Major'), ('B', 'Major'), [8, 1, 10, 3, 6], 5] [('B', 'Major'), ('F sharp', 'Major'), [1, 3, 6, 8, 10, 11], 6] [('B', 'Major'), ('E', 'Major'), [1, 3, 4, 6, 8, 11], 6] [('C sharp', 'Major'), ('E flat', 'Major'), [0, 8, 10, 3, 5], 5] [('C sharp', 'Major'), ('G sharp', 'Major'), [0, 1, 3, 5, 8, 10], 6] [('C sharp', 'Major'), ('F sharp', 'Major'), [1, 3, 5, 6, 8, 10], 6]
54.550943
68
0.388766
2,548
14,456
2.205651
0.008634
0.234875
0.086121
0.062633
1
1
1
1
0.985765
0.960142
0
0.120927
0.176259
14,456
264
69
54.757576
0.351025
0
0
1
0
0
0.304372
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
10
bf2aea4d1c025913f325764be4ce4c5069302edc
64
py
Python
mhkit/river/io/__init__.py
cmichelenstrofer/MHKiT-Python
7fda1eb13fc7cec832dc2b556a5bd8bc4d8ea6ce
[ "BSD-3-Clause" ]
3
2020-01-15T16:21:26.000Z
2020-01-28T17:10:13.000Z
mhkit/river/io/__init__.py
cmichelenstrofer/MHKiT-Python
7fda1eb13fc7cec832dc2b556a5bd8bc4d8ea6ce
[ "BSD-3-Clause" ]
null
null
null
mhkit/river/io/__init__.py
cmichelenstrofer/MHKiT-Python
7fda1eb13fc7cec832dc2b556a5bd8bc4d8ea6ce
[ "BSD-3-Clause" ]
4
2020-01-15T16:24:04.000Z
2020-01-15T20:45:22.000Z
from mhkit.river.io import usgs from mhkit.river.io import d3d
21.333333
31
0.796875
12
64
4.25
0.583333
0.352941
0.54902
0.627451
0.862745
0
0
0
0
0
0
0.018182
0.140625
64
2
32
32
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
9
172d0bd94a1af6dc18fedd9539b8b708ba606cf5
121
py
Python
graphite_feeder/handler/event/enum/clima/__init__.py
majamassarini/automate-graphite-feeder
0f17f99bbdaab86e10e0b7d424d055ff44fc4ca0
[ "MIT" ]
null
null
null
graphite_feeder/handler/event/enum/clima/__init__.py
majamassarini/automate-graphite-feeder
0f17f99bbdaab86e10e0b7d424d055ff44fc4ca0
[ "MIT" ]
null
null
null
graphite_feeder/handler/event/enum/clima/__init__.py
majamassarini/automate-graphite-feeder
0f17f99bbdaab86e10e0b7d424d055ff44fc4ca0
[ "MIT" ]
null
null
null
from graphite_feeder.handler.event.enum.clima import command from graphite_feeder.handler.event.enum.clima import season
40.333333
60
0.867769
18
121
5.722222
0.555556
0.23301
0.349515
0.485437
0.873786
0.873786
0.873786
0.873786
0
0
0
0
0.066116
121
2
61
60.5
0.911504
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
1
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
12
1739cb8664fb21299aedfc93248d79355c2d6fd1
5,076
py
Python
backend/pollaris/app/migrations/0013_add_constraints.py
Elizabeth-Warren/pollaris
153ca4297ceed1ac1685ca80c5890384800cfa8a
[ "MIT" ]
78
2020-03-27T14:49:11.000Z
2021-11-15T10:24:11.000Z
backend/pollaris/app/migrations/0013_add_constraints.py
Elizabeth-Warren/pollaris
153ca4297ceed1ac1685ca80c5890384800cfa8a
[ "MIT" ]
10
2020-06-06T01:47:56.000Z
2022-02-27T23:34:30.000Z
backend/pollaris/app/migrations/0013_add_constraints.py
Elizabeth-Warren/pollaris
153ca4297ceed1ac1685ca80c5890384800cfa8a
[ "MIT" ]
14
2020-03-27T17:36:30.000Z
2020-05-21T04:50:07.000Z
# Generated by Django 2.2.6 on 2020-01-09 16:38 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("app", "0012_unique_constraints")] operations = [ migrations.AlterField( model_name="dropboxlocation", name="address", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="dropboxlocation", name="city", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="dropboxlocation", name="dates_hours", field=models.CharField(blank=True, default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="dropboxlocation", name="location_name", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="dropboxlocation", name="state_code", field=models.CharField( default="", max_length=2, validators=[django.core.validators.RegexValidator("^[A-Z]{2}$")], ), preserve_default=False, ), migrations.AlterField( model_name="dropboxlocation", name="zip", field=models.CharField( default="", max_length=5, validators=[django.core.validators.RegexValidator("^[0-9]{5}$")], ), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="address", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="city", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="dates_hours", field=models.CharField(blank=True, default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="location_name", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="state_code", field=models.CharField( default="", max_length=2, validators=[django.core.validators.RegexValidator("^[A-Z]{2}$")], ), preserve_default=False, ), migrations.AlterField( model_name="earlyvotelocation", name="zip", field=models.CharField( default="", max_length=5, validators=[django.core.validators.RegexValidator("^[0-9]{5}$")], ), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="address", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="city", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="dates_hours", field=models.CharField(blank=True, default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="location_name", field=models.CharField(default="", max_length=1024), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="state_code", field=models.CharField( default="", max_length=2, validators=[django.core.validators.RegexValidator("^[A-Z]{2}$")], ), preserve_default=False, ), migrations.AlterField( model_name="pollinglocation", name="zip", field=models.CharField( default="", max_length=5, validators=[django.core.validators.RegexValidator("^[0-9]{5}$")], ), preserve_default=False, ), migrations.AlterField( model_name="precinct", name="precinct_code", field=models.CharField(blank=True, max_length=1024), ), ]
33.84
81
0.535461
417
5,076
6.354916
0.136691
0.143396
0.179245
0.207925
0.920755
0.909811
0.893208
0.893208
0.893208
0.863396
0
0.026831
0.346533
5,076
149
82
34.067114
0.772083
0.008865
0
0.937063
1
0
0.105985
0.004573
0
0
0
0
0
1
0
false
0
0.013986
0
0.034965
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
bdcc9196d97b804ddd35b3a5b8d301c46ec56dac
102
py
Python
event_chain/app/apis/__init__.py
ArcBlock/event-chain
50a37c76ab094386fc66c985f4174f8dabc98ad5
[ "MIT" ]
null
null
null
event_chain/app/apis/__init__.py
ArcBlock/event-chain
50a37c76ab094386fc66c985f4174f8dabc98ad5
[ "MIT" ]
null
null
null
event_chain/app/apis/__init__.py
ArcBlock/event-chain
50a37c76ab094386fc66c985f4174f8dabc98ad5
[ "MIT" ]
null
null
null
from event_chain.app.apis.ticket import api_ticket from event_chain.app.apis.mobile import api_mobile
34
50
0.862745
18
102
4.666667
0.5
0.214286
0.333333
0.404762
0.5
0
0
0
0
0
0
0
0.078431
102
2
51
51
0.893617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
da43779bee1964e09f2f635cdf175f70ffcf9518
2,265
py
Python
ZiggeoStreams.py
nraval1729/ZiggeoPythonSdk
5f4774fd505d16d249d43a75b9aab3483d7e892a
[ "Apache-2.0" ]
null
null
null
ZiggeoStreams.py
nraval1729/ZiggeoPythonSdk
5f4774fd505d16d249d43a75b9aab3483d7e892a
[ "Apache-2.0" ]
null
null
null
ZiggeoStreams.py
nraval1729/ZiggeoPythonSdk
5f4774fd505d16d249d43a75b9aab3483d7e892a
[ "Apache-2.0" ]
null
null
null
from builtins import object class ZiggeoStreams(object): def __init__(self, application): self.__application = application def index(self, video_token_or_key, data = None): return self.__application.connect.getJSON('/v1/videos/' + video_token_or_key + '/streams', data) def get(self, video_token_or_key, token_or_key): return self.__application.connect.getJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '') def download_video(self, video_token_or_key, token_or_key): return self.__application.connect.get('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/video') def download_image(self, video_token_or_key, token_or_key): return self.__application.connect.get('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/image') def push_to_service(self, video_token_or_key, token_or_key, data = None): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/push', data) def delete(self, video_token_or_key, token_or_key): return self.__application.connect.delete('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '') def create(self, video_token_or_key, data = None, file = None): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams', data, file) def attach_image(self, video_token_or_key, token_or_key, data = None, file = None): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/image', data, file) def attach_video(self, video_token_or_key, token_or_key, data = None, file = None): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/video', data, file) def attach_subtitle(self, video_token_or_key, token_or_key, data = None): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/subtitle', data) def bind(self, video_token_or_key, token_or_key): return self.__application.connect.postJSON('/v1/videos/' + video_token_or_key + '/streams/' + token_or_key + '/bind')
56.625
138
0.710817
318
2,265
4.632075
0.113208
0.190088
0.271555
0.224033
0.828921
0.828921
0.828921
0.816701
0.81127
0.785472
0
0.005741
0.154084
2,265
39
139
58.076923
0.763048
0
0
0
0
0
0.115283
0
0
0
0
0
0
1
0.461538
false
0
0.038462
0.423077
0.961538
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
11
16f3a2c947fb2cfff372e3cb4375ec2a4f6fc7e5
358
py
Python
ExtractCluster5.py
yasuhikohaga/my_bioinfo_codes
4355e2243761f0388ec2126cef1525d04c492f6f
[ "MIT" ]
null
null
null
ExtractCluster5.py
yasuhikohaga/my_bioinfo_codes
4355e2243761f0388ec2126cef1525d04c492f6f
[ "MIT" ]
null
null
null
ExtractCluster5.py
yasuhikohaga/my_bioinfo_codes
4355e2243761f0388ec2126cef1525d04c492f6f
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import pandas as pd df = pd.read_csv('/Users/yasuhikohaga/desktop/PC9_GEX-ATAC_exon/wnn_analysis/wnn/フィルタリング変更後_解析/SeuratObjectDataExtraction/metadata.csv') df = df[df.seurat_clusters == 5] df.to_csv('/Users/yasuhikohaga/desktop/PC9_GEX-ATAC_exon/wnn_analysis/wnn/フィルタリング変更後_解析/SeuratObjectDataExtraction/metadata_cluster5.csv')
35.8
139
0.818436
52
358
5.403846
0.557692
0.05694
0.142349
0.192171
0.718861
0.718861
0.718861
0.718861
0.718861
0.718861
0
0.014749
0.053073
358
9
140
39.777778
0.814159
0.058659
0
0
0
0.5
0.723214
0.723214
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
1
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
e529404d2dab638f763a453d2960e3d033846c82
87,819
py
Python
kit_imagenet.py
pmathewjacob/insightface-attendance
447b669e3d176bb1c78a6108334d6470a8fb25a8
[ "MIT" ]
5
2019-12-02T03:32:29.000Z
2020-06-14T19:22:40.000Z
kit_imagenet.py
pmathewjacob/insightface-attendance
447b669e3d176bb1c78a6108334d6470a8fb25a8
[ "MIT" ]
13
2020-03-24T17:53:27.000Z
2022-02-10T00:49:34.000Z
kit_imagenet.py
pmathewjacob/insightface-attendance
447b669e3d176bb1c78a6108334d6470a8fb25a8
[ "MIT" ]
6
2019-12-02T16:35:57.000Z
2020-08-21T08:44:27.000Z
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import math __weights_dict = dict() def load_weights(weight_file): if weight_file == None: return try: weights_dict = np.load(weight_file, allow_pickle=True).item() except: weights_dict = np.load(weight_file, encoding='bytes', allow_pickle=True).item() return weights_dict class KitModel(nn.Module): def __init__(self, weight_file): super(KitModel, self).__init__() global __weights_dict __weights_dict = load_weights(weight_file) self.vargface_head_conv1 = self.__conv(2, name='vargface_head_conv1', in_channels=3, out_channels=40, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False) self.vargface_head_conv1_bn = self.__batch_normalization(2, 'vargface_head_conv1_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_head_head_pooling_sep1_data_conv2d_depthwise', in_channels=40, out_channels=40, kernel_size=(3, 3), stride=(2, 2), groups=5, bias=False) self.vargface_head_head_pooling_shortcut_conv2d_depthwise = self.__conv(2, name='vargface_head_head_pooling_shortcut_conv2d_depthwise', in_channels=40, out_channels=40, kernel_size=(3, 3), stride=(2, 2), groups=5, bias=False) self.vargface_head_head_pooling_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_sep1_data_conv2d_depthwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_shortcut_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_shortcut_conv2d_depthwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_head_head_pooling_sep1_data_conv2d_pointwise', in_channels=40, out_channels=40, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_head_head_pooling_shortcut_conv2d_pointwise = self.__conv(2, name='vargface_head_head_pooling_shortcut_conv2d_pointwise', in_channels=40, out_channels=40, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_head_head_pooling_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_sep1_data_conv2d_pointwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_shortcut_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_shortcut_conv2d_pointwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_head_head_pooling_sep2_data_conv2d_depthwise', in_channels=40, out_channels=40, kernel_size=(3, 3), stride=(1, 1), groups=5, bias=False) self.vargface_head_head_pooling_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_sep2_data_conv2d_depthwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_head_head_pooling_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_head_head_pooling_sep2_data_conv2d_pointwise', in_channels=40, out_channels=40, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_head_head_pooling_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_head_head_pooling_sep2_data_conv2d_pointwise_bn', num_features=40, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise', in_channels=40, out_channels=80, kernel_size=(3, 3), stride=(2, 2), groups=5, bias=False) self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise', in_channels=40, out_channels=80, kernel_size=(3, 3), stride=(2, 2), groups=5, bias=False) self.vargface_stage_2_unit_1_shortcut_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_1_shortcut_conv2d_depthwise', in_channels=40, out_channels=80, kernel_size=(3, 3), stride=(2, 2), groups=5, bias=False) self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_shortcut_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_shortcut_conv2d_depthwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise', in_channels=80, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise', in_channels=80, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_1_shortcut_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_1_shortcut_conv2d_pointwise', in_channels=80, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_shortcut_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_shortcut_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_1_sep2_data_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=10, bias=False) self.vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_1_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_1_sep2_data_conv2d_pointwise', in_channels=160, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_1_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_1_sep2_data_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_2_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_2_sep1_data_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=10, bias=False) self.vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_2_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_2_sep1_data_conv2d_pointwise', in_channels=160, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_2_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_2_sep2_data_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=10, bias=False) self.vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_2_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_2_sep2_data_conv2d_pointwise', in_channels=160, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_2_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_2_sep2_data_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_3_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_3_sep1_data_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=10, bias=False) self.vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_3_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_3_sep1_data_conv2d_pointwise', in_channels=160, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_3_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_2_unit_3_sep2_data_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=10, bias=False) self.vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_2_unit_3_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_2_unit_3_sep2_data_conv2d_pointwise', in_channels=160, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_2_unit_3_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_2_unit_3_sep2_data_conv2d_pointwise_bn', num_features=80, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(2, 2), groups=10, bias=False) self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(2, 2), groups=10, bias=False) self.vargface_stage_3_unit_1_shortcut_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_1_shortcut_conv2d_depthwise', in_channels=80, out_channels=160, kernel_size=(3, 3), stride=(2, 2), groups=10, bias=False) self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_shortcut_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_shortcut_conv2d_depthwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise', in_channels=160, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise', in_channels=160, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_1_shortcut_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_1_shortcut_conv2d_pointwise', in_channels=160, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_shortcut_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_shortcut_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_1_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_1_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_1_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_1_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_1_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_2_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_2_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_2_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_2_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_2_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_2_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_2_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_2_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_2_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_2_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_3_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_3_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_3_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_3_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_3_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_3_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_3_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_3_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_3_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_3_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_4_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_4_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_4_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_4_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_4_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_4_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_4_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_4_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_4_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_4_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_5_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_5_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_5_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_5_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_5_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_5_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_5_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_5_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_5_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_5_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_6_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_6_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_6_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_6_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_6_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_6_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_6_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_6_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_6_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_6_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_7_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_7_sep1_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_7_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_7_sep1_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_7_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_3_unit_7_sep2_data_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=20, bias=False) self.vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_3_unit_7_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_3_unit_7_sep2_data_conv2d_pointwise', in_channels=320, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_3_unit_7_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_3_unit_7_sep2_data_conv2d_pointwise_bn', num_features=160, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(2, 2), groups=20, bias=False) self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(2, 2), groups=20, bias=False) self.vargface_stage_4_unit_1_shortcut_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_1_shortcut_conv2d_depthwise', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(2, 2), groups=20, bias=False) self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_shortcut_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_shortcut_conv2d_depthwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise', in_channels=320, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise', in_channels=320, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_1_shortcut_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_1_shortcut_conv2d_pointwise', in_channels=320, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_shortcut_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_shortcut_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_1_sep2_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_1_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_1_sep2_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_1_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_1_sep2_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_2_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_2_sep1_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_2_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_2_sep1_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_2_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_2_sep2_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_2_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_2_sep2_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_2_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_2_sep2_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_3_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_3_sep1_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_3_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_3_sep1_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_3_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_3_sep2_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_3_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_3_sep2_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_3_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_3_sep2_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_4_sep1_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_4_sep1_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_4_sep1_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_4_sep1_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_4_sep2_data_conv2d_depthwise = self.__conv(2, name='vargface_stage_4_unit_4_sep2_data_conv2d_depthwise', in_channels=320, out_channels=640, kernel_size=(3, 3), stride=(1, 1), groups=40, bias=False) self.vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_bn', num_features=640, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.vargface_stage_4_unit_4_sep2_data_conv2d_pointwise = self.__conv(2, name='vargface_stage_4_unit_4_sep2_data_conv2d_pointwise', in_channels=640, out_channels=320, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.vargface_stage_4_unit_4_sep2_data_conv2d_pointwise_bn = self.__batch_normalization(2, 'vargface_stage_4_unit_4_sep2_data_conv2d_pointwise_bn', num_features=320, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.embed_convx = self.__conv(2, name='embed_convx', in_channels=320, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.embed_convx_bn = self.__batch_normalization(2, 'embed_convx_bn', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.embed_convx_depthwise = self.__conv(2, name='embed_convx_depthwise', in_channels=1024, out_channels=1024, kernel_size=(7, 7), stride=(1, 1), groups=128, bias=False) self.embed_convx_depthwise_bn = self.__batch_normalization(2, 'embed_convx_depthwise_bn', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.embed_convx_pointwise = self.__conv(2, name='embed_convx_pointwise', in_channels=1024, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False) self.embed_convx_pointwise_bn = self.__batch_normalization(2, 'embed_convx_pointwise_bn', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421) self.pre_fc1 = self.__dense(name = 'pre_fc1', in_features = 512, out_features = 512, bias = True) self.fc1 = self.__batch_normalization(0, 'fc1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421) def forward(self, x): self.minusscalar0_second = torch.autograd.Variable(torch.from_numpy(__weights_dict['minusscalar0_second']['value']), requires_grad=False) self.mulscalar0_second = torch.autograd.Variable(torch.from_numpy(__weights_dict['mulscalar0_second']['value']), requires_grad=False) minusscalar0 = x - self.minusscalar0_second mulscalar0 = minusscalar0 * self.mulscalar0_second vargface_head_conv1_pad = F.pad(mulscalar0, (1, 1, 1, 1)) vargface_head_conv1 = self.vargface_head_conv1(vargface_head_conv1_pad) vargface_head_conv1_bn = self.vargface_head_conv1_bn(vargface_head_conv1) vargface_head_conv1_act = F.prelu(vargface_head_conv1_bn, torch.from_numpy(__weights_dict['vargface_head_conv1_act']['weights'])) vargface_head_head_pooling_sep1_data_conv2d_depthwise_pad = F.pad(vargface_head_conv1_act, (1, 1, 1, 1)) vargface_head_head_pooling_sep1_data_conv2d_depthwise = self.vargface_head_head_pooling_sep1_data_conv2d_depthwise(vargface_head_head_pooling_sep1_data_conv2d_depthwise_pad) vargface_head_head_pooling_shortcut_conv2d_depthwise_pad = F.pad(vargface_head_conv1_act, (1, 1, 1, 1)) vargface_head_head_pooling_shortcut_conv2d_depthwise = self.vargface_head_head_pooling_shortcut_conv2d_depthwise(vargface_head_head_pooling_shortcut_conv2d_depthwise_pad) vargface_head_head_pooling_sep1_data_conv2d_depthwise_bn = self.vargface_head_head_pooling_sep1_data_conv2d_depthwise_bn(vargface_head_head_pooling_sep1_data_conv2d_depthwise) vargface_head_head_pooling_shortcut_conv2d_depthwise_bn = self.vargface_head_head_pooling_shortcut_conv2d_depthwise_bn(vargface_head_head_pooling_shortcut_conv2d_depthwise) vargface_head_head_pooling_sep1_data_conv2d_depthwise_act = F.prelu(vargface_head_head_pooling_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_head_head_pooling_sep1_data_conv2d_depthwise_act']['weights'])) vargface_head_head_pooling_shortcut_conv2d_depthwise_act = F.prelu(vargface_head_head_pooling_shortcut_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_head_head_pooling_shortcut_conv2d_depthwise_act']['weights'])) vargface_head_head_pooling_sep1_data_conv2d_pointwise = self.vargface_head_head_pooling_sep1_data_conv2d_pointwise(vargface_head_head_pooling_sep1_data_conv2d_depthwise_act) vargface_head_head_pooling_shortcut_conv2d_pointwise = self.vargface_head_head_pooling_shortcut_conv2d_pointwise(vargface_head_head_pooling_shortcut_conv2d_depthwise_act) vargface_head_head_pooling_sep1_data_conv2d_pointwise_bn = self.vargface_head_head_pooling_sep1_data_conv2d_pointwise_bn(vargface_head_head_pooling_sep1_data_conv2d_pointwise) vargface_head_head_pooling_shortcut_conv2d_pointwise_bn = self.vargface_head_head_pooling_shortcut_conv2d_pointwise_bn(vargface_head_head_pooling_shortcut_conv2d_pointwise) vargface_head_head_pooling_sep1_data_conv2d_pointwise_act = F.prelu(vargface_head_head_pooling_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_head_head_pooling_sep1_data_conv2d_pointwise_act']['weights'])) vargface_head_head_pooling_sep2_data_conv2d_depthwise_pad = F.pad(vargface_head_head_pooling_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_head_head_pooling_sep2_data_conv2d_depthwise = self.vargface_head_head_pooling_sep2_data_conv2d_depthwise(vargface_head_head_pooling_sep2_data_conv2d_depthwise_pad) vargface_head_head_pooling_sep2_data_conv2d_depthwise_bn = self.vargface_head_head_pooling_sep2_data_conv2d_depthwise_bn(vargface_head_head_pooling_sep2_data_conv2d_depthwise) vargface_head_head_pooling_sep2_data_conv2d_depthwise_act = F.prelu(vargface_head_head_pooling_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_head_head_pooling_sep2_data_conv2d_depthwise_act']['weights'])) vargface_head_head_pooling_sep2_data_conv2d_pointwise = self.vargface_head_head_pooling_sep2_data_conv2d_pointwise(vargface_head_head_pooling_sep2_data_conv2d_depthwise_act) vargface_head_head_pooling_sep2_data_conv2d_pointwise_bn = self.vargface_head_head_pooling_sep2_data_conv2d_pointwise_bn(vargface_head_head_pooling_sep2_data_conv2d_pointwise) plus0 = vargface_head_head_pooling_sep2_data_conv2d_pointwise_bn + vargface_head_head_pooling_shortcut_conv2d_pointwise_bn vargface_head_head_pooling_out_data_act = F.prelu(plus0, torch.from_numpy(__weights_dict['vargface_head_head_pooling_out_data_act']['weights'])) vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_pad = F.pad(vargface_head_head_pooling_out_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise = self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise(vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_pad) vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_pad = F.pad(vargface_head_head_pooling_out_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise = self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise(vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_pad) vargface_stage_2_unit_1_shortcut_conv2d_depthwise_pad = F.pad(vargface_head_head_pooling_out_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_1_shortcut_conv2d_depthwise = self.vargface_stage_2_unit_1_shortcut_conv2d_depthwise(vargface_stage_2_unit_1_shortcut_conv2d_depthwise_pad) vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_bn(vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise) vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_bn(vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise) vargface_stage_2_unit_1_shortcut_conv2d_depthwise_bn = self.vargface_stage_2_unit_1_shortcut_conv2d_depthwise_bn(vargface_stage_2_unit_1_shortcut_conv2d_depthwise) vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_1_shortcut_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_1_shortcut_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_shortcut_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise = self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise(vargface_stage_2_unit_1_sep1_data_branch_conv2d_depthwise_act) vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise = self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise(vargface_stage_2_unit_1_sep2_data_branch_conv2d_depthwise_act) vargface_stage_2_unit_1_shortcut_conv2d_pointwise = self.vargface_stage_2_unit_1_shortcut_conv2d_pointwise(vargface_stage_2_unit_1_shortcut_conv2d_depthwise_act) vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise_bn(vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise) vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise_bn(vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise) vargface_stage_2_unit_1_shortcut_conv2d_pointwise_bn = self.vargface_stage_2_unit_1_shortcut_conv2d_pointwise_bn(vargface_stage_2_unit_1_shortcut_conv2d_pointwise) plus1 = vargface_stage_2_unit_1_sep1_data_branch_conv2d_pointwise_bn + vargface_stage_2_unit_1_sep2_data_branch_conv2d_pointwise_bn vargface_stage_2_unit_1_sep1_data_act = F.prelu(plus1, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_sep1_data_act']['weights'])) vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_1_sep1_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_1_sep2_data_conv2d_depthwise = self.vargface_stage_2_unit_1_sep2_data_conv2d_depthwise(vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_pad) vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_bn = self.vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_bn(vargface_stage_2_unit_1_sep2_data_conv2d_depthwise) vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_1_sep2_data_conv2d_pointwise = self.vargface_stage_2_unit_1_sep2_data_conv2d_pointwise(vargface_stage_2_unit_1_sep2_data_conv2d_depthwise_act) vargface_stage_2_unit_1_sep2_data_conv2d_pointwise_bn = self.vargface_stage_2_unit_1_sep2_data_conv2d_pointwise_bn(vargface_stage_2_unit_1_sep2_data_conv2d_pointwise) plus2 = vargface_stage_2_unit_1_sep2_data_conv2d_pointwise_bn + vargface_stage_2_unit_1_shortcut_conv2d_pointwise_bn vargface_stage_2_unit_1_out_data_act = F.prelu(plus2, torch.from_numpy(__weights_dict['vargface_stage_2_unit_1_out_data_act']['weights'])) vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_1_out_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_2_sep1_data_conv2d_depthwise = self.vargface_stage_2_unit_2_sep1_data_conv2d_depthwise(vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_pad) vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_bn = self.vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_bn(vargface_stage_2_unit_2_sep1_data_conv2d_depthwise) vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_2_sep1_data_conv2d_pointwise = self.vargface_stage_2_unit_2_sep1_data_conv2d_pointwise(vargface_stage_2_unit_2_sep1_data_conv2d_depthwise_act) vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_bn = self.vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_bn(vargface_stage_2_unit_2_sep1_data_conv2d_pointwise) vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_2_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_2_unit_2_sep2_data_conv2d_depthwise = self.vargface_stage_2_unit_2_sep2_data_conv2d_depthwise(vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_pad) vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_bn = self.vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_bn(vargface_stage_2_unit_2_sep2_data_conv2d_depthwise) vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_2_sep2_data_conv2d_pointwise = self.vargface_stage_2_unit_2_sep2_data_conv2d_pointwise(vargface_stage_2_unit_2_sep2_data_conv2d_depthwise_act) vargface_stage_2_unit_2_sep2_data_conv2d_pointwise_bn = self.vargface_stage_2_unit_2_sep2_data_conv2d_pointwise_bn(vargface_stage_2_unit_2_sep2_data_conv2d_pointwise) plus3 = vargface_stage_2_unit_2_sep2_data_conv2d_pointwise_bn + vargface_stage_2_unit_1_out_data_act vargface_stage_2_unit_2_out_data_act = F.prelu(plus3, torch.from_numpy(__weights_dict['vargface_stage_2_unit_2_out_data_act']['weights'])) vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_2_out_data_act, (1, 1, 1, 1)) vargface_stage_2_unit_3_sep1_data_conv2d_depthwise = self.vargface_stage_2_unit_3_sep1_data_conv2d_depthwise(vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_pad) vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_bn = self.vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_bn(vargface_stage_2_unit_3_sep1_data_conv2d_depthwise) vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_3_sep1_data_conv2d_pointwise = self.vargface_stage_2_unit_3_sep1_data_conv2d_pointwise(vargface_stage_2_unit_3_sep1_data_conv2d_depthwise_act) vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_bn = self.vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_bn(vargface_stage_2_unit_3_sep1_data_conv2d_pointwise) vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_3_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_2_unit_3_sep2_data_conv2d_depthwise = self.vargface_stage_2_unit_3_sep2_data_conv2d_depthwise(vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_pad) vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_bn = self.vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_bn(vargface_stage_2_unit_3_sep2_data_conv2d_depthwise) vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_2_unit_3_sep2_data_conv2d_pointwise = self.vargface_stage_2_unit_3_sep2_data_conv2d_pointwise(vargface_stage_2_unit_3_sep2_data_conv2d_depthwise_act) vargface_stage_2_unit_3_sep2_data_conv2d_pointwise_bn = self.vargface_stage_2_unit_3_sep2_data_conv2d_pointwise_bn(vargface_stage_2_unit_3_sep2_data_conv2d_pointwise) plus4 = vargface_stage_2_unit_3_sep2_data_conv2d_pointwise_bn + vargface_stage_2_unit_2_out_data_act vargface_stage_2_unit_3_out_data_act = F.prelu(plus4, torch.from_numpy(__weights_dict['vargface_stage_2_unit_3_out_data_act']['weights'])) vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_3_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise = self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise(vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_pad) vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_3_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise = self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise(vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_pad) vargface_stage_3_unit_1_shortcut_conv2d_depthwise_pad = F.pad(vargface_stage_2_unit_3_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_1_shortcut_conv2d_depthwise = self.vargface_stage_3_unit_1_shortcut_conv2d_depthwise(vargface_stage_3_unit_1_shortcut_conv2d_depthwise_pad) vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_bn(vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise) vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_bn(vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise) vargface_stage_3_unit_1_shortcut_conv2d_depthwise_bn = self.vargface_stage_3_unit_1_shortcut_conv2d_depthwise_bn(vargface_stage_3_unit_1_shortcut_conv2d_depthwise) vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_1_shortcut_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_1_shortcut_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_shortcut_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise = self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise(vargface_stage_3_unit_1_sep1_data_branch_conv2d_depthwise_act) vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise = self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise(vargface_stage_3_unit_1_sep2_data_branch_conv2d_depthwise_act) vargface_stage_3_unit_1_shortcut_conv2d_pointwise = self.vargface_stage_3_unit_1_shortcut_conv2d_pointwise(vargface_stage_3_unit_1_shortcut_conv2d_depthwise_act) vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise_bn(vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise) vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise_bn(vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise) vargface_stage_3_unit_1_shortcut_conv2d_pointwise_bn = self.vargface_stage_3_unit_1_shortcut_conv2d_pointwise_bn(vargface_stage_3_unit_1_shortcut_conv2d_pointwise) plus5 = vargface_stage_3_unit_1_sep1_data_branch_conv2d_pointwise_bn + vargface_stage_3_unit_1_sep2_data_branch_conv2d_pointwise_bn vargface_stage_3_unit_1_sep1_data_act = F.prelu(plus5, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_sep1_data_act']['weights'])) vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_1_sep1_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_1_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_1_sep2_data_conv2d_depthwise(vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_1_sep2_data_conv2d_depthwise) vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_1_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_1_sep2_data_conv2d_pointwise(vargface_stage_3_unit_1_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_1_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_1_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_1_sep2_data_conv2d_pointwise) plus6 = vargface_stage_3_unit_1_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_1_shortcut_conv2d_pointwise_bn vargface_stage_3_unit_1_out_data_act = F.prelu(plus6, torch.from_numpy(__weights_dict['vargface_stage_3_unit_1_out_data_act']['weights'])) vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_1_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_2_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_2_sep1_data_conv2d_depthwise(vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_2_sep1_data_conv2d_depthwise) vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_2_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_2_sep1_data_conv2d_pointwise(vargface_stage_3_unit_2_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_2_sep1_data_conv2d_pointwise) vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_2_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_2_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_2_sep2_data_conv2d_depthwise(vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_2_sep2_data_conv2d_depthwise) vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_2_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_2_sep2_data_conv2d_pointwise(vargface_stage_3_unit_2_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_2_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_2_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_2_sep2_data_conv2d_pointwise) plus7 = vargface_stage_3_unit_2_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_1_out_data_act vargface_stage_3_unit_2_out_data_act = F.prelu(plus7, torch.from_numpy(__weights_dict['vargface_stage_3_unit_2_out_data_act']['weights'])) vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_2_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_3_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_3_sep1_data_conv2d_depthwise(vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_3_sep1_data_conv2d_depthwise) vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_3_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_3_sep1_data_conv2d_pointwise(vargface_stage_3_unit_3_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_3_sep1_data_conv2d_pointwise) vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_3_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_3_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_3_sep2_data_conv2d_depthwise(vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_3_sep2_data_conv2d_depthwise) vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_3_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_3_sep2_data_conv2d_pointwise(vargface_stage_3_unit_3_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_3_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_3_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_3_sep2_data_conv2d_pointwise) plus8 = vargface_stage_3_unit_3_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_2_out_data_act vargface_stage_3_unit_3_out_data_act = F.prelu(plus8, torch.from_numpy(__weights_dict['vargface_stage_3_unit_3_out_data_act']['weights'])) vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_3_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_4_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_4_sep1_data_conv2d_depthwise(vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_4_sep1_data_conv2d_depthwise) vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_4_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_4_sep1_data_conv2d_pointwise(vargface_stage_3_unit_4_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_4_sep1_data_conv2d_pointwise) vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_4_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_4_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_4_sep2_data_conv2d_depthwise(vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_4_sep2_data_conv2d_depthwise) vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_4_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_4_sep2_data_conv2d_pointwise(vargface_stage_3_unit_4_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_4_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_4_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_4_sep2_data_conv2d_pointwise) plus9 = vargface_stage_3_unit_4_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_3_out_data_act vargface_stage_3_unit_4_out_data_act = F.prelu(plus9, torch.from_numpy(__weights_dict['vargface_stage_3_unit_4_out_data_act']['weights'])) vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_4_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_5_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_5_sep1_data_conv2d_depthwise(vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_5_sep1_data_conv2d_depthwise) vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_5_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_5_sep1_data_conv2d_pointwise(vargface_stage_3_unit_5_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_5_sep1_data_conv2d_pointwise) vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_5_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_5_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_5_sep2_data_conv2d_depthwise(vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_5_sep2_data_conv2d_depthwise) vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_5_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_5_sep2_data_conv2d_pointwise(vargface_stage_3_unit_5_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_5_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_5_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_5_sep2_data_conv2d_pointwise) plus10 = vargface_stage_3_unit_5_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_4_out_data_act vargface_stage_3_unit_5_out_data_act = F.prelu(plus10, torch.from_numpy(__weights_dict['vargface_stage_3_unit_5_out_data_act']['weights'])) vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_5_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_6_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_6_sep1_data_conv2d_depthwise(vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_6_sep1_data_conv2d_depthwise) vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_6_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_6_sep1_data_conv2d_pointwise(vargface_stage_3_unit_6_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_6_sep1_data_conv2d_pointwise) vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_6_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_6_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_6_sep2_data_conv2d_depthwise(vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_6_sep2_data_conv2d_depthwise) vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_6_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_6_sep2_data_conv2d_pointwise(vargface_stage_3_unit_6_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_6_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_6_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_6_sep2_data_conv2d_pointwise) plus11 = vargface_stage_3_unit_6_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_5_out_data_act vargface_stage_3_unit_6_out_data_act = F.prelu(plus11, torch.from_numpy(__weights_dict['vargface_stage_3_unit_6_out_data_act']['weights'])) vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_6_out_data_act, (1, 1, 1, 1)) vargface_stage_3_unit_7_sep1_data_conv2d_depthwise = self.vargface_stage_3_unit_7_sep1_data_conv2d_depthwise(vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_pad) vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_bn(vargface_stage_3_unit_7_sep1_data_conv2d_depthwise) vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_7_sep1_data_conv2d_pointwise = self.vargface_stage_3_unit_7_sep1_data_conv2d_pointwise(vargface_stage_3_unit_7_sep1_data_conv2d_depthwise_act) vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_bn(vargface_stage_3_unit_7_sep1_data_conv2d_pointwise) vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_7_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_3_unit_7_sep2_data_conv2d_depthwise = self.vargface_stage_3_unit_7_sep2_data_conv2d_depthwise(vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_pad) vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_bn = self.vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_bn(vargface_stage_3_unit_7_sep2_data_conv2d_depthwise) vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_3_unit_7_sep2_data_conv2d_pointwise = self.vargface_stage_3_unit_7_sep2_data_conv2d_pointwise(vargface_stage_3_unit_7_sep2_data_conv2d_depthwise_act) vargface_stage_3_unit_7_sep2_data_conv2d_pointwise_bn = self.vargface_stage_3_unit_7_sep2_data_conv2d_pointwise_bn(vargface_stage_3_unit_7_sep2_data_conv2d_pointwise) plus12 = vargface_stage_3_unit_7_sep2_data_conv2d_pointwise_bn + vargface_stage_3_unit_6_out_data_act vargface_stage_3_unit_7_out_data_act = F.prelu(plus12, torch.from_numpy(__weights_dict['vargface_stage_3_unit_7_out_data_act']['weights'])) vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_7_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise = self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise(vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_pad) vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_7_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise = self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise(vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_pad) vargface_stage_4_unit_1_shortcut_conv2d_depthwise_pad = F.pad(vargface_stage_3_unit_7_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_1_shortcut_conv2d_depthwise = self.vargface_stage_4_unit_1_shortcut_conv2d_depthwise(vargface_stage_4_unit_1_shortcut_conv2d_depthwise_pad) vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_bn = self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_bn(vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise) vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_bn = self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_bn(vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise) vargface_stage_4_unit_1_shortcut_conv2d_depthwise_bn = self.vargface_stage_4_unit_1_shortcut_conv2d_depthwise_bn(vargface_stage_4_unit_1_shortcut_conv2d_depthwise) vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_1_shortcut_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_1_shortcut_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_shortcut_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise = self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise(vargface_stage_4_unit_1_sep1_data_branch_conv2d_depthwise_act) vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise = self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise(vargface_stage_4_unit_1_sep2_data_branch_conv2d_depthwise_act) vargface_stage_4_unit_1_shortcut_conv2d_pointwise = self.vargface_stage_4_unit_1_shortcut_conv2d_pointwise(vargface_stage_4_unit_1_shortcut_conv2d_depthwise_act) vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise_bn = self.vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise_bn(vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise) vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise_bn = self.vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise_bn(vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise) vargface_stage_4_unit_1_shortcut_conv2d_pointwise_bn = self.vargface_stage_4_unit_1_shortcut_conv2d_pointwise_bn(vargface_stage_4_unit_1_shortcut_conv2d_pointwise) plus13 = vargface_stage_4_unit_1_sep1_data_branch_conv2d_pointwise_bn + vargface_stage_4_unit_1_sep2_data_branch_conv2d_pointwise_bn vargface_stage_4_unit_1_sep1_data_act = F.prelu(plus13, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_sep1_data_act']['weights'])) vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_1_sep1_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_1_sep2_data_conv2d_depthwise = self.vargface_stage_4_unit_1_sep2_data_conv2d_depthwise(vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_pad) vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_bn(vargface_stage_4_unit_1_sep2_data_conv2d_depthwise) vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_1_sep2_data_conv2d_pointwise = self.vargface_stage_4_unit_1_sep2_data_conv2d_pointwise(vargface_stage_4_unit_1_sep2_data_conv2d_depthwise_act) vargface_stage_4_unit_1_sep2_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_1_sep2_data_conv2d_pointwise_bn(vargface_stage_4_unit_1_sep2_data_conv2d_pointwise) plus14 = vargface_stage_4_unit_1_sep2_data_conv2d_pointwise_bn + vargface_stage_4_unit_1_shortcut_conv2d_pointwise_bn vargface_stage_4_unit_1_out_data_act = F.prelu(plus14, torch.from_numpy(__weights_dict['vargface_stage_4_unit_1_out_data_act']['weights'])) vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_1_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_2_sep1_data_conv2d_depthwise = self.vargface_stage_4_unit_2_sep1_data_conv2d_depthwise(vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_pad) vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_bn(vargface_stage_4_unit_2_sep1_data_conv2d_depthwise) vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_2_sep1_data_conv2d_pointwise = self.vargface_stage_4_unit_2_sep1_data_conv2d_pointwise(vargface_stage_4_unit_2_sep1_data_conv2d_depthwise_act) vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_bn(vargface_stage_4_unit_2_sep1_data_conv2d_pointwise) vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_2_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_4_unit_2_sep2_data_conv2d_depthwise = self.vargface_stage_4_unit_2_sep2_data_conv2d_depthwise(vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_pad) vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_bn(vargface_stage_4_unit_2_sep2_data_conv2d_depthwise) vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_2_sep2_data_conv2d_pointwise = self.vargface_stage_4_unit_2_sep2_data_conv2d_pointwise(vargface_stage_4_unit_2_sep2_data_conv2d_depthwise_act) vargface_stage_4_unit_2_sep2_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_2_sep2_data_conv2d_pointwise_bn(vargface_stage_4_unit_2_sep2_data_conv2d_pointwise) plus15 = vargface_stage_4_unit_2_sep2_data_conv2d_pointwise_bn + vargface_stage_4_unit_1_out_data_act vargface_stage_4_unit_2_out_data_act = F.prelu(plus15, torch.from_numpy(__weights_dict['vargface_stage_4_unit_2_out_data_act']['weights'])) vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_2_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_3_sep1_data_conv2d_depthwise = self.vargface_stage_4_unit_3_sep1_data_conv2d_depthwise(vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_pad) vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_bn(vargface_stage_4_unit_3_sep1_data_conv2d_depthwise) vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_3_sep1_data_conv2d_pointwise = self.vargface_stage_4_unit_3_sep1_data_conv2d_pointwise(vargface_stage_4_unit_3_sep1_data_conv2d_depthwise_act) vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_bn(vargface_stage_4_unit_3_sep1_data_conv2d_pointwise) vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_3_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_4_unit_3_sep2_data_conv2d_depthwise = self.vargface_stage_4_unit_3_sep2_data_conv2d_depthwise(vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_pad) vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_bn(vargface_stage_4_unit_3_sep2_data_conv2d_depthwise) vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_3_sep2_data_conv2d_pointwise = self.vargface_stage_4_unit_3_sep2_data_conv2d_pointwise(vargface_stage_4_unit_3_sep2_data_conv2d_depthwise_act) vargface_stage_4_unit_3_sep2_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_3_sep2_data_conv2d_pointwise_bn(vargface_stage_4_unit_3_sep2_data_conv2d_pointwise) plus16 = vargface_stage_4_unit_3_sep2_data_conv2d_pointwise_bn + vargface_stage_4_unit_2_out_data_act vargface_stage_4_unit_3_out_data_act = F.prelu(plus16, torch.from_numpy(__weights_dict['vargface_stage_4_unit_3_out_data_act']['weights'])) vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_3_out_data_act, (1, 1, 1, 1)) vargface_stage_4_unit_4_sep1_data_conv2d_depthwise = self.vargface_stage_4_unit_4_sep1_data_conv2d_depthwise(vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_pad) vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_bn(vargface_stage_4_unit_4_sep1_data_conv2d_depthwise) vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_4_sep1_data_conv2d_pointwise = self.vargface_stage_4_unit_4_sep1_data_conv2d_pointwise(vargface_stage_4_unit_4_sep1_data_conv2d_depthwise_act) vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_bn(vargface_stage_4_unit_4_sep1_data_conv2d_pointwise) vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_act = F.prelu(vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_act']['weights'])) vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_pad = F.pad(vargface_stage_4_unit_4_sep1_data_conv2d_pointwise_act, (1, 1, 1, 1)) vargface_stage_4_unit_4_sep2_data_conv2d_depthwise = self.vargface_stage_4_unit_4_sep2_data_conv2d_depthwise(vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_pad) vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_bn = self.vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_bn(vargface_stage_4_unit_4_sep2_data_conv2d_depthwise) vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_act = F.prelu(vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_bn, torch.from_numpy(__weights_dict['vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_act']['weights'])) vargface_stage_4_unit_4_sep2_data_conv2d_pointwise = self.vargface_stage_4_unit_4_sep2_data_conv2d_pointwise(vargface_stage_4_unit_4_sep2_data_conv2d_depthwise_act) vargface_stage_4_unit_4_sep2_data_conv2d_pointwise_bn = self.vargface_stage_4_unit_4_sep2_data_conv2d_pointwise_bn(vargface_stage_4_unit_4_sep2_data_conv2d_pointwise) plus17 = vargface_stage_4_unit_4_sep2_data_conv2d_pointwise_bn + vargface_stage_4_unit_3_out_data_act vargface_stage_4_unit_4_out_data_act = F.prelu(plus17, torch.from_numpy(__weights_dict['vargface_stage_4_unit_4_out_data_act']['weights'])) embed_convx = self.embed_convx(vargface_stage_4_unit_4_out_data_act) embed_convx_bn = self.embed_convx_bn(embed_convx) embed_convx_act = F.prelu(embed_convx_bn, torch.from_numpy(__weights_dict['embed_convx_act']['weights'])) embed_convx_depthwise = self.embed_convx_depthwise(embed_convx_act) embed_convx_depthwise_bn = self.embed_convx_depthwise_bn(embed_convx_depthwise) embed_convx_pointwise = self.embed_convx_pointwise(embed_convx_depthwise_bn) embed_convx_pointwise_bn = self.embed_convx_pointwise_bn(embed_convx_pointwise) embed_convx_pointwise_act = F.prelu(embed_convx_pointwise_bn, torch.from_numpy(__weights_dict['embed_convx_pointwise_act']['weights'])) pre_fc1 = self.pre_fc1(embed_convx_pointwise_act.view(embed_convx_pointwise_act.size(0), -1)) fc1 = self.fc1(pre_fc1) return fc1 @staticmethod def __conv(dim, name, **kwargs): if dim == 1: layer = nn.Conv1d(**kwargs) elif dim == 2: layer = nn.Conv2d(**kwargs) elif dim == 3: layer = nn.Conv3d(**kwargs) else: raise NotImplementedError() layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights'])) if 'bias' in __weights_dict[name]: layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias'])) return layer @staticmethod def __batch_normalization(dim, name, **kwargs): if dim == 0 or dim == 1: layer = nn.BatchNorm1d(**kwargs) elif dim == 2: layer = nn.BatchNorm2d(**kwargs) elif dim == 3: layer = nn.BatchNorm3d(**kwargs) else: raise NotImplementedError() if 'scale' in __weights_dict[name]: layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale'])) else: layer.weight.data.fill_(1) if 'bias' in __weights_dict[name]: layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias'])) else: layer.bias.data.fill_(0) layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean'])) layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var'])) return layer @staticmethod def __dense(name, **kwargs): layer = nn.Linear(**kwargs) layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights'])) if 'bias' in __weights_dict[name]: layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias'])) return layer
168.882692
251
0.852162
14,212
87,819
4.563397
0.010273
0.190224
0.09714
0.124894
0.976393
0.968499
0.962285
0.957228
0.944291
0.918156
0
0.099801
0.079345
87,819
519
252
169.208092
0.702354
0
0
0.035928
0
0
0.137079
0.128903
0
0
0
0
0
1
0.011976
false
0
0.00998
0
0.035928
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e58bd3f16eaafbc287b2f1d6c92f534d489e2eff
6,887
py
Python
QuantumInformation/RecurNum.py
pranay1990/QuantumInformation
6588e623c3c4839e2b484a5ce57bb9aac9bb458c
[ "Unlicense" ]
null
null
null
QuantumInformation/RecurNum.py
pranay1990/QuantumInformation
6588e623c3c4839e2b484a5ce57bb9aac9bb458c
[ "Unlicense" ]
null
null
null
QuantumInformation/RecurNum.py
pranay1990/QuantumInformation
6588e623c3c4839e2b484a5ce57bb9aac9bb458c
[ "Unlicense" ]
null
null
null
""" Created on Mon Jul 15 18:26:57 2019 @authors: Dr. M. S. Ramkarthik and Dr. Pranay Barkataki """ def recur_comb_add(mylist,vec,icount,sum2): lenvec=len(vec) if icount<=lenvec-1: for j in range(icount,lenvec): sum3=sum2+vec[j] sum3=int(sum3) mylist.append(sum3) recur_comb_add(mylist,vec,j+1,sum3) if j==lenvec: return() if icount==lenvec: return() def RecurChainRL1(row,tot_spins,icount,mylist,shift): len_row=len(row) if icount<len_row: for x in range(icount,len_row): row2=row.copy() if x>=0: row2[x]=1 if shift==0: y=0 if shift==1: y=1 sumr=0 if shift==0: for x1 in range(len_row-1,-1,-1): if row2[x1]==0: sumr=sumr+(2**y) if row2[x1]==1: sumr=sumr+(2**(y+1)) y=y+2 mylist.append(sumr) if shift==1: for x1 in range(len_row-1,-1,-1): if row2[x1]==0 and x1==len_row-1: sumr=sumr+(2**(tot_spins-1)) if row2[x1]==1 and x1==len_row-1: sumr=sumr+1 if row2[x1]==0 and x1!=len_row-1: sumr=sumr+(2**y) y=y+2 if row2[x1]==1 and x1!=len_row-1: sumr=sumr+(2**(y+1)) y=y+2 mylist.append(sumr) if x<len_row-1 and x!=-1: RecurChainRL1(row2,tot_spins,x+1,mylist,shift) if x==len_row-1: return() if icount >= len_row: return() def RecurChainRL2(row,tot_spins,icount,mylist,shift): len_row=len(row) if icount<len_row: for x in range(icount,len_row): row2=row.copy() if x>=0: row2[x]=1 if shift==0: y=0 if shift==1: y=1 sumr=0 cntr=0 if shift==0: for x1 in range(len_row-1,-1,-1): if row2[x1]==0: sumr=sumr+(2**y) cntr=cntr+0 if row2[x1]==1: sumr=sumr+(2**(y+1)) cntr=cntr+1 y=y+2 if cntr%2==0: mylist.append(sumr) if cntr%2==1: mylist.append(-sumr) if shift==1: for x1 in range(len_row-1,-1,-1): if row2[x1]==0 and x1==len_row-1: sumr=sumr+(2**(tot_spins-1)) if row2[x1]==1 and x1==len_row-1: sumr=sumr+1 cntr=cntr+1 if row2[x1]==0 and x1!=len_row-1: sumr=sumr+(2**y) y=y+2 if row2[x1]==1 and x1!=len_row-1: sumr=sumr+(2**(y+1)) y=y+2 cntr=cntr+1 if cntr%2==0: mylist.append(sumr) #print(sumr) if cntr%2!=0: mylist.append(-sumr) #print(sumr) if x<len_row-1 and x!=-1: RecurChainRL2(row2,tot_spins,x+1,mylist,shift) if x==len_row-1: return() if icount >= len_row: return() def RecurChainRL3(row,tot_spins,icount,mylist,shift): len_row=len(row) if icount<len_row: for x in range(icount,len_row): row2=row.copy() if x>=0: row2[x]=1 if shift==0: y=0 if shift==1: y=1 sumr=0 if shift==0: for x1 in range(len_row-1,-1,-1): if row2[x1]==0: sumr=sumr+0 if row2[x1]==1: sumr=sumr+(2**y)+(2**(y+1)) y=y+2 mylist.append(sumr) if shift==1: for x1 in range(len_row-1,-1,-1): if row2[x1]==0 and x1==len_row-1: sumr=sumr+0 if row2[x1]==1 and x1==len_row-1: sumr=sumr+1+(2**(tot_spins-1)) if row2[x1]==0 and x1!=len_row-1: sumr=sumr+0 y=y+2 if row2[x1]==1 and x1!=len_row-1: sumr=sumr+(2**(y+1))+(2**y) y=y+2 mylist.append(sumr) if x<len_row-1 and x!=-1: RecurChainRL3(row2,tot_spins,x+1,mylist,shift) if x==len_row-1: return() if icount >= len_row: return() def RecurChainRL4(row,tot_spins,icount,mylist,shift): len_row=len(row) if icount<len_row: for x in range(icount,len_row): row2=row.copy() if x>=0: row2[x]=1 if shift==0: y=0 if shift==1: y=1 sumr=0 cntr=0 if shift==0: for x1 in range(len_row-1,-1,-1): if row2[x1]==0: sumr=sumr+0 cntr=cntr+0 if row2[x1]==1: sumr=sumr+(2**y)+(2**(y+1)) cntr=cntr+1 y=y+2 if cntr%2==0: mylist.append(sumr) if cntr%2==1: mylist.append(-sumr) if shift==1: for x1 in range(len_row-1,-1,-1): if row2[x1]==0 and x1==len_row-1: sumr=sumr+0 if row2[x1]==1 and x1==len_row-1: sumr=sumr+1+(2**(tot_spins-1)) cntr=cntr+1 if row2[x1]==0 and x1!=len_row-1: sumr=sumr+0 y=y+2 if row2[x1]==1 and x1!=len_row-1: sumr=sumr+(2**(y+1))+(2**y) y=y+2 cntr=cntr+1 if cntr%2==0: mylist.append(sumr) #print(sumr) if cntr%2!=0: mylist.append(-sumr) #print(sumr) if x<len_row-1 and x!=-1: RecurChainRL4(row2,tot_spins,x+1,mylist,shift) if x==len_row-1: return() if icount >= len_row: return()
32.952153
62
0.37186
875
6,887
2.853714
0.062857
0.12495
0.089708
0.070485
0.872647
0.855827
0.855827
0.855827
0.855827
0.852623
0
0.090722
0.505445
6,887
208
63
33.110577
0.642396
0.019893
0
0.905263
0
0
0
0
0
0
0
0
0
1
0.026316
false
0
0
0
0.026316
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
e5a43a458defd1bdceda52cfe49a90a95163d69c
44,196
py
Python
django_c3/tests.py
eclipseconsulting/Django-C3
9a8f0042cadb4664ebed536d96d3626ce40da328
[ "BSD-3-Clause" ]
7
2018-04-14T08:26:23.000Z
2022-01-05T09:50:50.000Z
django_c3/tests.py
eclipseconsulting/Django-C3
9a8f0042cadb4664ebed536d96d3626ce40da328
[ "BSD-3-Clause" ]
1
2019-07-23T19:58:55.000Z
2019-07-23T19:58:55.000Z
django_c3/tests.py
eclipseconsulting/Django-C3
9a8f0042cadb4664ebed536d96d3626ce40da328
[ "BSD-3-Clause" ]
2
2018-05-16T17:10:31.000Z
2019-07-11T19:46:54.000Z
from django.test import SimpleTestCase from django.template import Context, Template ############################################################################### class DonutChartTest(SimpleTestCase): def setUp(self): self.chart_data = [ {'title': 'A', 'value': 6}, {'title': 'B', 'value': 10, 'color': 'red'}, {'title': 'C', 'value': 84}, ] self.context = Context({'pie_chart': self.chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'inner_title': 'my_inner_title', 'outer_title': 'my_outer_title', 'show_legend': False, 'height': 130, 'width': 120 } self.template = ( '{%% load c3 %%}' '{%% donut "%(bind_element)s" pie_chart ' 'inner_title="%(inner_title)s" outer_title="%(outer_title)s" ' 'show_legend=%(show_legend)s height=%(height)s ' 'width=%(width)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'colors: {.*"B"\:.*"red"' self.assertRegex(rendered_template, rgx) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"A\",\s+6\]') self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"B\",\s+10\]') self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"C\",\s+84\]') def test_chart_inner_title(self): self.tag_arguments['inner_title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'donut: { title: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_outer_title(self): self.tag_arguments['outer_title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) ############################################################################### class PieChartTest(SimpleTestCase): def setUp(self): self.pie_chart_data = [ {'title': 'A', 'value': 6}, {'title': 'B', 'value': 10, 'color': 'red'}, {'title': 'C', 'value': 84}, ] self.context = Context({'pie_chart': self.pie_chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'title': 'pie-lchart', 'show_legend': False, 'height': 130, 'width': 120 } self.template = ( '{%% load c3 %%}' '{%% pie "%(bind_element)s" pie_chart title="%(title)s" ' 'show_legend=%(show_legend)s height=%(height)s ' 'width=%(width)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'colors: {.*"B"\:.*"red"' self.assertRegex(rendered_template, rgx) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"A\",\s+6\]') self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"B\",\s+10\]') self.assertRegex(rendered_template, r'columns\:\s+\[.*\[\"C\",\s+84\]') def test_chart_title(self): self.tag_arguments['title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) ############################################################################### class StepChartTest(SimpleTestCase): def setUp(self): self.step_chart_data = { 'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'], 'horizontal_lines': [40], # 'vertical_lines': [40], 'data': [ {'title': 'A', 'values': [26, 35, 52, 34, 45, 74], 'color': '#FF34FF'}, ], # 'groups': [('A',)] } self.context = Context({'step_chart': self.step_chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'title': 'step-chart', 'area': False, 'x_is_category': True, 'labels': False, 'vertical_grid_line': False, 'horizontal_grid_line': False, 'zoom': False, 'show_legend': False, 'group_tooltip': True, 'height': 130, 'width': 120 } self.template = ( '{%% load c3 %%}' '{%% step "%(bind_element)s" step_chart title="%(title)s" ' 'area=%(area)s x_is_category=%(x_is_category)s ' 'labels=%(labels)s vertical_grid_line=%(vertical_grid_line)s ' 'horizontal_grid_line=%(horizontal_grid_line)s ' 'show_legend=%(show_legend)s zoom=%(zoom)s ' 'group_tooltip=%(group_tooltip)s ' 'height=%(height)s width=%(width)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_chart_title(self): self.tag_arguments['title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_area_true(self): self.tag_arguments['area'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "area-step"' self.assertRegex(rendered_template, rgx) def test_chart_area_false(self): self.tag_arguments['area'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "step"' self.assertRegex(rendered_template, rgx) def test_chart_labels_true(self): self.tag_arguments['labels'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : true' self.assertRegex(rendered_template, rgx) def test_chart_labels_false(self): self.tag_arguments['labels'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : false' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_true(self): self.tag_arguments['vertical_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_false(self): self.tag_arguments['vertical_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_true(self): self.tag_arguments['horizontal_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_false(self): self.tag_arguments['horizontal_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: false },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_true(self): self.tag_arguments['zoom'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: true },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_false(self): self.tag_arguments['zoom'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_true(self): self.tag_arguments['group_tooltip'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: true },' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_false(self): self.tag_arguments['group_tooltip'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: false },' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'"A": "#FF34FF",' self.assertRegex(rendered_template, rgx) def test_x_is_category(self): self.tag_arguments['x_is_category'] = False self.template_to_render = Template(self.template % self.tag_arguments) self.assertRaises( ValueError, self.template_to_render.render, self.context) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertTrue('["A", 26,35,52,34,45,74]' in rendered_template) self.assertRegex( rendered_template, r'x: "2d2014226823e74c2accfcce8e0ca141",') columns = ( '["2d2014226823e74c2accfcce8e0ca141", ' '\'2017-5-19\',\'2017-5-20\',\'2017-5-21\',\'2017-5-22\']' ) self.assertTrue(columns in rendered_template) ############################################################################### class LineXYChartTest(SimpleTestCase): def setUp(self): self.line_xy_chart_data = { 'horizontal_lines': [40], 'data': [ {'title': 'A', 'values': [(i, i*2) for i in range(3)], "color": "blue"}, ], # 'groups': [('A', 'B')] } self.context = Context({'line_xy_chart': self.line_xy_chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'title': 'step-chart', 'area': False, 'labels': False, 'vertical_grid_line': False, 'horizontal_grid_line': False, 'zoom': False, 'show_legend': False, 'group_tooltip': True, 'height': 130, 'width': 120, 'show_points': True, 'angle': True, } self.template = ( '{%% load c3 %%}' '{%% line_xy "%(bind_element)s" ' 'line_xy_chart title="%(title)s" ' 'area=%(area)s ' 'labels=%(labels)s vertical_grid_line=%(vertical_grid_line)s ' 'horizontal_grid_line=%(horizontal_grid_line)s ' 'show_legend=%(show_legend)s zoom=%(zoom)s ' 'group_tooltip=%(group_tooltip)s ' 'height=%(height)s width=%(width)s ' 'show_points=%(show_points)s angle=%(angle)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_chart_title(self): self.tag_arguments['title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_area_true(self): self.tag_arguments['area'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "area"' self.assertRegex(rendered_template, rgx) def test_chart_area_false(self): self.tag_arguments['area'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "line"' self.assertRegex(rendered_template, rgx) def test_chart_labels_true(self): self.tag_arguments['labels'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : true' self.assertRegex(rendered_template, rgx) def test_chart_labels_false(self): self.tag_arguments['labels'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : false' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_true(self): self.tag_arguments['vertical_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_false(self): self.tag_arguments['vertical_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_true(self): self.tag_arguments['horizontal_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_false(self): self.tag_arguments['horizontal_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: false },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_true(self): self.tag_arguments['zoom'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: true },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_false(self): self.tag_arguments['zoom'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_true(self): self.tag_arguments['group_tooltip'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: true },' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_false(self): self.tag_arguments['group_tooltip'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: false },' self.assertRegex(rendered_template, rgx) def test_chart_show_points_true(self): self.tag_arguments['show_points'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'point: { show: true },' self.assertRegex(rendered_template, rgx) def test_chart_show_points_false(self): self.tag_arguments['show_points'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'point: { show: false },' self.assertRegex(rendered_template, rgx) def test_chart_show_angle_true(self): self.tag_arguments['angle'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "line"' self.assertRegex(rendered_template, rgx) def test_chart_angle_false(self): self.tag_arguments['angle'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "spline"' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'colors: { "A": "blue", }' self.assertRegex(rendered_template, rgx) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertTrue( 'columns: [ ["A", 0,2,4], ["A_x", 0,1,2], ]' in rendered_template) ############################################################################### class BarChartTest(SimpleTestCase): def setUp(self): self.bar_chart_data = { 'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'], 'horizontal_lines': [40], 'data': [ {'title': 'A', 'values': [26, 5, 52, 74]}, {'title': 'B', 'values': [54, 21, 40, 26], 'color': 'red'}, {'title': 'C', 'values': [63, 14, 25, 11]}, ], # 'groups': [('B', 'C')] } self.context = Context({'bar_chart': self.bar_chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'title': 'step-chart', 'x_is_category': True, 'labels': False, 'vertical_grid_line': False, 'horizontal_grid_line': False, 'zoom': False, 'show_legend': False, 'group_tooltip': True, 'height': 130, 'width': 120, 'column_width': 35 } self.template = ( '{%% load c3 %%}' '{%% bar "%(bind_element)s" bar_chart title="%(title)s" ' 'x_is_category=%(x_is_category)s ' 'labels=%(labels)s vertical_grid_line=%(vertical_grid_line)s ' 'horizontal_grid_line=%(horizontal_grid_line)s ' 'show_legend=%(show_legend)s zoom=%(zoom)s ' 'group_tooltip=%(group_tooltip)s ' 'height=%(height)s width=%(width)s ' 'column_width=%(column_width)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_chart_title(self): self.tag_arguments['title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_labels_true(self): self.tag_arguments['labels'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : true' self.assertRegex(rendered_template, rgx) def test_chart_labels_false(self): self.tag_arguments['labels'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : false' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_true(self): self.tag_arguments['vertical_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_false(self): self.tag_arguments['vertical_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_true(self): self.tag_arguments['horizontal_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_false(self): self.tag_arguments['horizontal_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: false },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_true(self): self.tag_arguments['zoom'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: true },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_false(self): self.tag_arguments['zoom'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_true(self): self.tag_arguments['group_tooltip'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: true },' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_false(self): self.tag_arguments['group_tooltip'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: false },' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) def test_chart_column_width(self): self.tag_arguments['column_width'] = 35 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'bar: { width: %s },' % self.tag_arguments['column_width'] self.assertRegex(rendered_template, rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'colors: { "B": "red", }' self.assertRegex(rendered_template, rgx) def test_x_is_category(self): self.tag_arguments['x_is_category'] = False self.template_to_render = Template(self.template % self.tag_arguments) self.assertRaises( ValueError, self.template_to_render.render, self.context) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertTrue('["A", 26,5,52,74]' in rendered_template) self.assertTrue('["B", 54,21,40,26]' in rendered_template) self.assertTrue('["C", 63,14,25,11]' in rendered_template) self.assertRegex( rendered_template, r'x: "2d2014226823e74c2accfcce8e0ca141",') columns = ( '["2d2014226823e74c2accfcce8e0ca141", ' '\'2017-5-19\',\'2017-5-20\',\'2017-5-21\',\'2017-5-22\']' ) self.assertTrue(columns in rendered_template) ############################################################################### class LineChartTest(SimpleTestCase): def setUp(self): self.line_chart_data = { 'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'], 'horizontal_lines': [40], 'data': [ {'title': 'A', 'values': [26, 35, 52, 34, 45, 74]}, {'title': 'B', 'values': [54, 25, 52, 26, 20, 89], 'color': 'red'}, ], # 'groups': [('A', 'B')] } self.context = Context({'line_chart': self.line_chart_data}) self.tag_arguments = { 'bind_element': '#chart', 'title': 'step-chart', 'x_is_category': True, 'area': False, 'labels': False, 'vertical_grid_line': False, 'horizontal_grid_line': False, 'zoom': False, 'show_legend': False, 'group_tooltip': True, 'height': 130, 'width': 120, 'show_points': True, 'angle': True, } self.template = ( '{%% load c3 %%}' '{%% line "%(bind_element)s" ' 'line_chart title="%(title)s" ' 'area=%(area)s x_is_category=%(x_is_category)s ' 'labels=%(labels)s vertical_grid_line=%(vertical_grid_line)s ' 'horizontal_grid_line=%(horizontal_grid_line)s ' 'show_legend=%(show_legend)s zoom=%(zoom)s ' 'group_tooltip=%(group_tooltip)s ' 'height=%(height)s width=%(width)s ' 'show_points=%(show_points)s angle=%(angle)s %%}' ) def test_bind_element(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) bind_to_rgx = r'bindto:\s+"%s"' % self.tag_arguments['bind_element'] self.assertRegex(rendered_template, bind_to_rgx) def test_chart_title(self): self.tag_arguments['title'] = 'mytitle' self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'title: { text: "%s"}' % 'mytitle' self.assertRegex(rendered_template, rgx) def test_chart_area_true(self): self.tag_arguments['area'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "area"' self.assertRegex(rendered_template, rgx) def test_chart_area_false(self): self.tag_arguments['area'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "line"' self.assertRegex(rendered_template, rgx) def test_chart_labels_true(self): self.tag_arguments['labels'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : true' self.assertRegex(rendered_template, rgx) def test_chart_labels_false(self): self.tag_arguments['labels'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'labels : false' self.assertRegex(rendered_template, rgx) def test_chart_legend_true(self): self.tag_arguments['show_legend'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: true }' self.assertRegex(rendered_template, rgx) def test_chart_legend_false(self): self.tag_arguments['show_legend'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'legend: { show: false }' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_true(self): self.tag_arguments['vertical_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_vertical_grid_line_false(self): self.tag_arguments['vertical_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'x: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_true(self): self.tag_arguments['horizontal_grid_line'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: true ,lines' self.assertRegex(rendered_template, rgx) def test_chart_horizontal_grid_line_false(self): self.tag_arguments['horizontal_grid_line'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: false },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_true(self): self.tag_arguments['zoom'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'zoom: { enabled: true },' self.assertRegex(rendered_template, rgx) def test_chart_zoom_false(self): self.tag_arguments['zoom'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'y: { show: false ,lines' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_true(self): self.tag_arguments['group_tooltip'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: true },' self.assertRegex(rendered_template, rgx) def test_chart_group_tooltip_false(self): self.tag_arguments['group_tooltip'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'tooltip: { grouped: false },' self.assertRegex(rendered_template, rgx) def test_chart_show_points_true(self): self.tag_arguments['show_points'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'point: { show: true },' self.assertRegex(rendered_template, rgx) def test_chart_show_points_false(self): self.tag_arguments['show_points'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'point: { show: false },' self.assertRegex(rendered_template, rgx) def test_chart_show_angle_true(self): self.tag_arguments['angle'] = True self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "line"' self.assertRegex(rendered_template, rgx) def test_chart_angle_false(self): self.tag_arguments['angle'] = False self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'type : "spline"' self.assertRegex(rendered_template, rgx) def test_chart_height(self): self.tag_arguments['height'] = 500 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: %s' % self.tag_arguments['height'] self.assertRegex(rendered_template, rgx) def test_chart_width(self): self.tag_arguments['width'] = 402 self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'size: { height: 130, width: %s }' % self.tag_arguments['width'] self.assertRegex(rendered_template, rgx) def test_colors(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) rgx = r'colors: { "B": "red", }' self.assertRegex(rendered_template, rgx) def test_data(self): self.template_to_render = Template(self.template % self.tag_arguments) rendered_template = self.template_to_render.render(self.context) self.assertTrue('["A", 26,35,52,34,45,74]' in rendered_template) self.assertTrue('["B", 54,25,52,26,20,89]' in rendered_template) self.assertTrue( ('["2d2014226823e74c2accfcce8e0ca141",' '\'2017-5-19\',\'2017-5-20\',\'2017-5-21\',\'2017-5-22\']') in rendered_template)
44.778116
79
0.642117
5,259
44,196
5.121316
0.023769
0.144358
0.130101
0.157428
0.973972
0.958601
0.955259
0.950915
0.946683
0.944678
0
0.015344
0.228754
44,196
986
80
44.823529
0.774805
0.002512
0
0.896635
0
0.001202
0.151886
0.028116
0
0
0
0
0.141827
1
0.134615
false
0
0.002404
0
0.144231
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e5eb82f32b9f10ea4466b87befd45af6e9400d3d
4,554
py
Python
libs/loss.py
james20141606/membrane_prediction
aed9627145e612f80339a1521c305a746f707fe6
[ "MIT" ]
null
null
null
libs/loss.py
james20141606/membrane_prediction
aed9627145e612f80339a1521c305a746f707fe6
[ "MIT" ]
null
null
null
libs/loss.py
james20141606/membrane_prediction
aed9627145e612f80339a1521c305a746f707fe6
[ "MIT" ]
null
null
null
from __future__ import print_function, division from torch.nn.modules.loss import _assert_no_grad, _Loss import torch.nn.functional as F import torch # define a customized loss function for future development class WeightedBCELoss(_Loss): def __init__(self, size_average=True, reduce=True): super(WeightedBCELoss, self).__init__(size_average, reduce) def forward(self, input, target, weight): _assert_no_grad(target) return F.binary_cross_entropy(input, target, weight, self.size_average, self.reduce) # Weighted binary cross entropy + Dice loss class BCLoss(_Loss): def __init__(self, size_average=True, reduce=True): super(BCLoss, self).__init__(size_average, reduce) def dice_loss(self, input, target): smooth = 1. loss = 0. for index in range(input.size()[0]): iflat = input[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() loss += 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)) # size_average=True for the dice loss return loss / float(input.size()[0]) def forward(self, input, target, weight): _assert_no_grad(target) """ Weighted binary classification loss + Dice coefficient loss """ loss1 = F.binary_cross_entropy(input, target, weight, self.size_average, self.reduce) loss2 = self.dice_loss(input, target) return loss1, loss2 # Focal Loss class FocalLoss(_Loss): def __init__(self, size_average=True, reduce=True): super().__init__(size_average, reduce) def focal_loss(self, input, target, weight): gamma = 2 eps = 1e-7 loss = 0. for index in range(input.size()[0]): iflat = input[index].view(-1) tflat = target[index].view(-1) wflat = weight[index].view(-1) iflat = iflat.clamp(eps, 1.0 - eps) fc_loss_pos = -1 * tflat * torch.log(iflat) * (1 - iflat) ** gamma fc_loss_neg = -1 * (1-tflat) * torch.log(1 - iflat) * (iflat) ** gamma fc_loss = fc_loss_pos + fc_loss_neg fc_loss = fc_loss * wflat # weighted focal loss loss += fc_loss.mean() return loss / float(input.size()[0]) def forward(self, input, target, weight): _assert_no_grad(target) """ Weighted Focal Loss """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) loss = self.focal_loss(input, target, weight) return loss # Focal Loss + Dice Loss class BCLoss_focal(_Loss): def __init__(self, size_average=True, reduce=True): super().__init__(size_average, reduce) def dice_loss(self, input, target): smooth = 1. loss = 0. for index in range(input.size()[0]): iflat = input[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() loss += 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)) # size_average=True for the dice loss return loss / float(input.size()[0]) def focal_loss(self, input, target, weight): gamma = 2 eps = 1e-7 loss = 0. for index in range(input.size()[0]): iflat = input[index].view(-1) tflat = target[index].view(-1) wflat = weight[index].view(-1) iflat = iflat.clamp(eps, 1.0 - eps) fc_loss_pos = -1 * tflat * torch.log(iflat) * (1 - iflat) ** gamma fc_loss_neg = -1 * (1-tflat) * torch.log(1 - iflat) * (iflat) ** gamma fc_loss = fc_loss_pos + fc_loss_neg fc_loss = fc_loss * wflat # weighted focal loss loss += fc_loss.mean() return loss / float(input.size()[0]) def forward(self, input, target, weight): _assert_no_grad(target) """ Weighted binary classification loss + Dice coefficient loss """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) loss1 = self.focal_loss(input, target, weight) loss2 = self.dice_loss(input, target) return loss1, loss2
34.5
120
0.577075
566
4,554
4.45583
0.14311
0.038065
0.067407
0.04996
0.8636
0.8636
0.828311
0.828311
0.828311
0.793418
0
0.017533
0.298639
4,554
132
121
34.5
0.772073
0.053579
0
0.827586
0
0
0.025409
0
0
0
0
0
0.057471
1
0.137931
false
0
0.045977
0
0.321839
0.011494
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e5fe74a345140265f1cd7b5e289f7f58717a83a1
12
py
Python
_draft/answers/x_9_6.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
null
null
null
_draft/answers/x_9_6.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
1
2021-11-13T08:03:04.000Z
2021-11-13T08:03:04.000Z
_draft/answers/x_9_6.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
null
null
null
# x_9_6 # #
3
7
0.416667
3
12
1
1
0
0
0
0
0
0
0
0
0
0
0.25
0.333333
12
3
8
4
0.125
0.416667
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
8
f91b65408b4d4cc541329de17ba3022dbd686633
3,663
py
Python
plotting.py
shutingli/StarLWebInterface
c65530ece7b1ee6b0086c0788b9d7710e36098af
[ "BSD-3-Clause" ]
null
null
null
plotting.py
shutingli/StarLWebInterface
c65530ece7b1ee6b0086c0788b9d7710e36098af
[ "BSD-3-Clause" ]
null
null
null
plotting.py
shutingli/StarLWebInterface
c65530ece7b1ee6b0086c0788b9d7710e36098af
[ "BSD-3-Clause" ]
null
null
null
import os from sys import argv def writePlot( filename ,robotnum, robotID, mode): writeFile = open(filename +".gy", "wb") writeFile.write( "set term png\n") writeFile.write( "set output 'output.png'\n") writeFile.write("set style line 1 lc rgb '#0060ad' lt 1 lw 2 pt 2 ps 1.5 # --- blue\n") writeFile.write("set style line 2 lc rgb '#dd181f' lt 1 lw 2 pt 2 ps 1.5 # --- red\n") writeFile.write("set style line 3 lc rgb '#00ff80' lt 1 lw 2 pt 2 ps 1.5 # --- green\n") writeFile.write("set style line 4 lc rgb '#80ff00' lt 1 lw 2 pt 2 ps 1.5 # --- green\n") robotID = int(robotID) robotnum = int(robotnum) if int(mode) == 1: writeFile.write( "set xlabel 't (ms)'\n") writeFile.write( "set ylabel 'X (mm)'\n") writeFile.write( "set title ' "+filename+" x vs t '\n") tmp = robotID if robotnum ==1: writeFile.write( "plot '"+filename+str(robotID)+".dat' using 1:2 title 'robot" +str(robotID)+ "' with l ls "+str(robotID)+"\n") writeFile.close() return; newID = 1 for i in range(robotnum): while int(tmp) % 2 ==0: tmp = tmp/2 newID=newID+1 if i == 0: writeFile.write( "plot '"+filename+str(newID)+".dat' using 1:2 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") elif i == robotnum-1: writeFile.write( " '"+filename+str(newID)+".dat' using 1:2 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+" \n") else: writeFile.write( " '"+filename+str(newID)+".dat' using 1:2 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") tmp = tmp/2 newID = newID+1 if int(mode)== 2: writeFile.write( "set xlabel 't (ms)'\n") writeFile.write( "set ylabel 'Y (mm)'\n") writeFile.write( "set title ' "+filename+" y vs t '\n") tmp = robotID if robotnum ==1: writeFile.write( "plot '"+filename+str(robotID)+".dat' using 1:3 title 'robot" +str(robotID)+ "' with l ls "+str(robotID)+"\n") writeFile.close() return; newID = 1 for i in range(robotnum): while int(tmp) % 2 ==0: tmp = tmp/2 newID=newID+1 if i == 0: writeFile.write( "plot '"+filename+str(newID)+".dat' using 1:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") elif i == robotnum-1: writeFile.write( " '"+filename+str(newID)+".dat' using 1:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+" \n") else: writeFile.write( " '"+filename+str(newID)+".dat' using 1:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") tmp = tmp/2 newID = newID+1 if int(mode)== 3: writeFile.write( "set xlabel 'Y (ms)'\n") writeFile.write( "set ylabel 'X (mm)'\n") writeFile.write( "set title ' "+filename+" y vs x '\n") tmp = robotID if robotnum ==1: writeFile.write( "plot '"+filename+str(robotID)+".dat' using 2:3 title 'robot" +str(robotID)+ "' with l ls "+str(robotID)+"\n") writeFile.close() return; newID = 1 for i in range(robotnum): while int(tmp) % 2 ==0: tmp = tmp/2 newID=newID+1 if i == 0: writeFile.write( "plot '"+filename+str(newID)+".dat' using 2:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") elif i == robotnum-1: writeFile.write( " '"+filename+str(newID)+".dat' using 2:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+" \n") else: writeFile.write( " '"+filename+str(newID)+".dat' using 2:3 title 'robot" +str(newID)+ "' with l ls "+str(i+1)+", \\") writeFile.write("\n") tmp = tmp/2 newID = newID+1 writeFile.close() print "success" writePlot(argv[1],argv[2],argv[3],argv[4]) os.system("gnuplot "+argv[1]+".gy")
40.7
130
0.595141
591
3,663
3.688663
0.128596
0.211927
0.116972
0.055046
0.849083
0.838073
0.788991
0.788073
0.788073
0.759174
0
0.037351
0.196014
3,663
89
131
41.157303
0.702886
0
0
0.602273
0
0.045455
0.311493
0
0
0
0
0
0
0
null
null
0
0.022727
null
null
0.011364
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
00c4b307b8474d9233ea6c44d4101e02f9278c76
203
py
Python
sfaira/data/dataloaders/databases/cellxgene/__init__.py
johnmous/sfaira
c50240a74530e614ab7681bf9c63b04cb815b361
[ "BSD-3-Clause" ]
110
2020-09-08T07:47:15.000Z
2022-03-29T03:33:56.000Z
sfaira/data/dataloaders/databases/cellxgene/__init__.py
johnmous/sfaira
c50240a74530e614ab7681bf9c63b04cb815b361
[ "BSD-3-Clause" ]
405
2020-09-15T15:05:46.000Z
2022-03-16T14:44:23.000Z
sfaira/data/dataloaders/databases/cellxgene/__init__.py
johnmous/sfaira
c50240a74530e614ab7681bf9c63b04cb815b361
[ "BSD-3-Clause" ]
20
2021-03-30T15:30:14.000Z
2022-03-07T12:52:58.000Z
from sfaira.data.dataloaders.databases.cellxgene.cellxgene_group import DatasetSuperGroupCellxgene, DatasetGroupCellxgene from sfaira.data.dataloaders.databases.cellxgene.cellxgene_loader import Dataset
67.666667
121
0.901478
21
203
8.619048
0.571429
0.110497
0.154696
0.276243
0.574586
0.574586
0.574586
0
0
0
0
0
0.044335
203
2
122
101.5
0.93299
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
00d98ba5128787589ffde6e456183c3d87761aaa
14,478
py
Python
pyvisdk/mo/storage_resource_manager.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
pyvisdk/mo/storage_resource_manager.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
pyvisdk/mo/storage_resource_manager.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
from pyvisdk.base.managed_object_types import ManagedObjectTypes from pyvisdk.base.base_entity import BaseEntity import logging ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) class StorageResourceManager(BaseEntity): '''This managed object type provides a way to configure resource usage for storage resources.''' def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.StorageResourceManager): super(StorageResourceManager, self).__init__(core, name=name, ref=ref, type=type) def ApplyStorageDrsRecommendation_Task(self, key): '''Applies a recommendation from the recommendation list. Each recommendation can be applied only once. In the case of CreateVm and CloneVm a VirtualMachine is returned. Other workflows don't have a return value.Applies a recommendation from the recommendation list. Each recommendation can be applied only once. In the case of CreateVm and CloneVm a VirtualMachine is returned. Other workflows don't have a return value.Applies a recommendation from the recommendation list. Each recommendation can be applied only once. In the case of CreateVm and CloneVm a VirtualMachine is returned. Other workflows don't have a return value. :param key: The key fields of the Recommendations that are applied. ''' return self.delegate("ApplyStorageDrsRecommendation_Task")(key) def ApplyStorageDrsRecommendationToPod_Task(self, pod, key): '''Applies a recommendation from the recommendation list. Each recommendation can be applied only once.Applies a recommendation from the recommendation list. Each recommendation can be applied only once. :param pod: The storage pod. :param key: The key field of the Recommendation. ''' return self.delegate("ApplyStorageDrsRecommendationToPod_Task")(pod, key) def CancelStorageDrsRecommendation(self, key): '''Cancels a recommendation. Currently only initial placement recommendations can be cancelled. Migration recommendations cannot. :param key: The key field of the Recommendation. ''' return self.delegate("CancelStorageDrsRecommendation")(key) def ConfigureDatastoreIORM_Task(self, datastore, spec): '''Changes configuration of storage I/O resource management for a given datastore. The changes are applied to all the backing storage devices for the datastore. Currently we only support storage I/O resource management on VMFS volumes. In order to enable storage I/O resource management on a datstore, we require that all the hosts that are attached to the datastore support this feature.Changes configuration of storage I/O resource management for a given datastore. The changes are applied to all the backing storage devices for the datastore. Currently we only support storage I/O resource management on VMFS volumes. In order to enable storage I/O resource management on a datstore, we require that all the hosts that are attached to the datastore support this feature. :param datastore: The datastore to be configured. :param spec: The configuration spec. ''' return self.delegate("ConfigureDatastoreIORM_Task")(datastore, spec) def ConfigureStorageDrsForPod_Task(self, pod, spec, modify): '''Change the storage DRS configuration for a pod StoragePod. :param pod: The storage pod. :param spec: A set of storage Drs configuration changes to apply to the storage pod. The specification can be a complete set of changes or a partial set of changes, applied incrementally. :param modify: Flag to specify whether the specification ("spec") should be applied incrementally. If "modify" is false and the operation succeeds, then the configuration of the storage pod matches the specification exactly; in this case any unset portions of the specification will result in unset or default portions of the configuration. ''' return self.delegate("ConfigureStorageDrsForPod_Task")(pod, spec, modify) def QueryIORMConfigOption(self, host): '''Query configuration options for storage I/O resource management. :param host: [in] - The host VC will forward the query to. This parameter is ignored by host if this method is called on a host directly. ''' return self.delegate("QueryIORMConfigOption")(host) def RecommendDatastores(self, storageSpec): '''This method returns a StoragePlacementResult object. This API is intended to replace the following existing APIs for SDRS-enabled pods: CreateVm: StoragePlacementSpec::type == create = CreateVM_Task AddDisk: StoragePlacementSpec::type == reconfigure = ReconfigVM_Task RelocateVm: StoragePlacementSpec::type == relocate = RelocateVM_Task CloneVm: StoragePlacementSpec::type == clone = CloneVM_Task The PodSelectionSpec parameter in StoragePlacementSpec is required for all workflows. It specifies which SDRS-enabled pod the user has selected for the VM and/or for each disk. For CreateVm, RelocateVm and CloneVm, PodSelectionSpec.storagePod is the user selected SDRS pod for the VM, i.e., its system files. For all workflows, PodSelectionSpec.disk.storagePod is the user selected SDRS pod for the given disk. Note that a DiskLocator must be specified for each disk that the user requests to create, migrate or clone into an SDRS pod, even if it's the same pod as the VM or the user has manually selected a datastore within the pod. If the user has manually selected a datastore, the datastore must be specified in the workflow specific fields as described below. For CreateVm and AddDisk, the manually selected datastore must be specified in ConfigSpec.files or ConfigSpec.deviceChange.device.backing.datastore, the fields should will be unset if the user wants SDRS to recommend the datastore. For RelocateVm, the manually selected datastore must be specified in RelocateSpec.datastore or RelocateSpec.disk.datastore; the fields should be unset iff the user wants SDRS recommendations. For CloneVm, the manually selected datastore must be specified in CloneSpec.location.datastore or CloneSpec.location.disk[].datastore; the fields should be unset iff the user wants SDRS recommendations. The remaining expected input parameters in StoragePlacementSpec will be the same as those for the existing API as determined by StoragePlacementSpec::type. If a parameter is optional in the existing API, it will also be optional in the new API.This method returns a StoragePlacementResult object. This API is intended to replace the following existing APIs for SDRS-enabled pods: CreateVm: StoragePlacementSpec::type == create = CreateVM_Task AddDisk: StoragePlacementSpec::type == reconfigure = ReconfigVM_Task RelocateVm: StoragePlacementSpec::type == relocate = RelocateVM_Task CloneVm: StoragePlacementSpec::type == clone = CloneVM_Task The PodSelectionSpec parameter in StoragePlacementSpec is required for all workflows. It specifies which SDRS-enabled pod the user has selected for the VM and/or for each disk. For CreateVm, RelocateVm and CloneVm, PodSelectionSpec.storagePod is the user selected SDRS pod for the VM, i.e., its system files. For all workflows, PodSelectionSpec.disk.storagePod is the user selected SDRS pod for the given disk. Note that a DiskLocator must be specified for each disk that the user requests to create, migrate or clone into an SDRS pod, even if it's the same pod as the VM or the user has manually selected a datastore within the pod. If the user has manually selected a datastore, the datastore must be specified in the workflow specific fields as described below. For CreateVm and AddDisk, the manually selected datastore must be specified in ConfigSpec.files or ConfigSpec.deviceChange.device.backing.datastore, the fields should will be unset if the user wants SDRS to recommend the datastore. For RelocateVm, the manually selected datastore must be specified in RelocateSpec.datastore or RelocateSpec.disk.datastore; the fields should be unset iff the user wants SDRS recommendations. For CloneVm, the manually selected datastore must be specified in CloneSpec.location.datastore or CloneSpec.location.disk[].datastore; the fields should be unset iff the user wants SDRS recommendations. The remaining expected input parameters in StoragePlacementSpec will be the same as those for the existing API as determined by StoragePlacementSpec::type. If a parameter is optional in the existing API, it will also be optional in the new API.This method returns a StoragePlacementResult object. This API is intended to replace the following existing APIs for SDRS-enabled pods: CreateVm: StoragePlacementSpec::type == create = CreateVM_Task AddDisk: StoragePlacementSpec::type == reconfigure = ReconfigVM_Task RelocateVm: StoragePlacementSpec::type == relocate = RelocateVM_Task CloneVm: StoragePlacementSpec::type == clone = CloneVM_Task The PodSelectionSpec parameter in StoragePlacementSpec is required for all workflows. It specifies which SDRS-enabled pod the user has selected for the VM and/or for each disk. For CreateVm, RelocateVm and CloneVm, PodSelectionSpec.storagePod is the user selected SDRS pod for the VM, i.e., its system files. For all workflows, PodSelectionSpec.disk.storagePod is the user selected SDRS pod for the given disk. Note that a DiskLocator must be specified for each disk that the user requests to create, migrate or clone into an SDRS pod, even if it's the same pod as the VM or the user has manually selected a datastore within the pod. If the user has manually selected a datastore, the datastore must be specified in the workflow specific fields as described below. For CreateVm and AddDisk, the manually selected datastore must be specified in ConfigSpec.files or ConfigSpec.deviceChange.device.backing.datastore, the fields should will be unset if the user wants SDRS to recommend the datastore. For RelocateVm, the manually selected datastore must be specified in RelocateSpec.datastore or RelocateSpec.disk.datastore; the fields should be unset iff the user wants SDRS recommendations. For CloneVm, the manually selected datastore must be specified in CloneSpec.location.datastore or CloneSpec.location.disk[].datastore; the fields should be unset iff the user wants SDRS recommendations. The remaining expected input parameters in StoragePlacementSpec will be the same as those for the existing API as determined by StoragePlacementSpec::type. If a parameter is optional in the existing API, it will also be optional in the new API.This method returns a StoragePlacementResult object. This API is intended to replace the following existing APIs for SDRS-enabled pods: CreateVm: StoragePlacementSpec::type == create = CreateVM_Task AddDisk: StoragePlacementSpec::type == reconfigure = ReconfigVM_Task RelocateVm: StoragePlacementSpec::type == relocate = RelocateVM_Task CloneVm: StoragePlacementSpec::type == clone = CloneVM_Task The PodSelectionSpec parameter in StoragePlacementSpec is required for all workflows. It specifies which SDRS-enabled pod the user has selected for the VM and/or for each disk. For CreateVm, RelocateVm and CloneVm, PodSelectionSpec.storagePod is the user selected SDRS pod for the VM, i.e., its system files. For all workflows, PodSelectionSpec.disk.storagePod is the user selected SDRS pod for the given disk. Note that a DiskLocator must be specified for each disk that the user requests to create, migrate or clone into an SDRS pod, even if it's the same pod as the VM or the user has manually selected a datastore within the pod. If the user has manually selected a datastore, the datastore must be specified in the workflow specific fields as described below. For CreateVm and AddDisk, the manually selected datastore must be specified in ConfigSpec.files or ConfigSpec.deviceChange.device.backing.datastore, the fields should will be unset if the user wants SDRS to recommend the datastore. For RelocateVm, the manually selected datastore must be specified in RelocateSpec.datastore or RelocateSpec.disk.datastore; the fields should be unset iff the user wants SDRS recommendations. For CloneVm, the manually selected datastore must be specified in CloneSpec.location.datastore or CloneSpec.location.disk[].datastore; the fields should be unset iff the user wants SDRS recommendations. The remaining expected input parameters in StoragePlacementSpec will be the same as those for the existing API as determined by StoragePlacementSpec::type. If a parameter is optional in the existing API, it will also be optional in the new API. :param storageSpec: ''' return self.delegate("RecommendDatastores")(storageSpec) def RefreshStorageDrsRecommendation(self, pod): '''Make Storage DRS invoke again on the specified pod StoragePod and return a new list of recommendations. Concurrent "refresh" requests may be combined together and trigger only one Storage DRS invocation. :param pod: The storage pod. The recommendations generated is stored at PodStorageDrsEntry#recommendation. ''' return self.delegate("RefreshStorageDrsRecommendation")(pod)
64.633929
348
0.723857
1,864
14,478
5.601395
0.127682
0.024136
0.028733
0.036778
0.786515
0.781917
0.777416
0.777416
0.777416
0.777416
0
0
0.229383
14,478
224
349
64.633929
0.935825
0.789819
0
0
1
0
0.149515
0.137217
0
0
0
0
0
1
0.391304
false
0
0.130435
0
0.913043
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
8
970fb8804146ef0abbb40f00a0ed87eef6d6abc0
12,851
py
Python
tests/unit_tests/test_vmedia.py
ecoen66/imcsdk
b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab
[ "Apache-2.0" ]
31
2016-06-14T07:23:59.000Z
2021-09-12T17:17:26.000Z
tests/unit_tests/test_vmedia.py
sthagen/imcsdk
1831eaecb5960ca03a8624b1579521749762b932
[ "Apache-2.0" ]
109
2016-05-25T03:56:56.000Z
2021-10-18T02:58:12.000Z
tests/unit_tests/test_vmedia.py
sthagen/imcsdk
1831eaecb5960ca03a8624b1579521749762b932
[ "Apache-2.0" ]
67
2016-05-17T05:53:56.000Z
2022-03-24T15:52:53.000Z
# Copyright 2016 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import patch, call, MagicMock from nose.tools import assert_raises from imcsdk.imchandle import ImcHandle from imcsdk.imcexception import ImcOperationError from imcsdk.mometa.comm.CommVMediaMap import CommVMediaMap from imcsdk.apis.server.vmedia import vmedia_mount_iso_uri, \ vmedia_mount_remove_all, \ vmedia_get_existing_uri, \ vmedia_get_existing_status from imcsdk.imccoreutils import IMC_PLATFORM @patch.object(ImcHandle, 'query_children') @patch.object(ImcHandle, 'login') def test_vmedia_get_existing_uri(login_mock, query_mock): # Patch ImcHandle.login to create a Faux ImcHandle w/o real CIMC # Patch ImcHandle.query_children to simulate CIMC interaction login_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # Scenario: No pre-existing mappings query_mock.return_value = [] assert vmedia_get_existing_uri(test_cimc) == [] # Assert query_children called with correct in_dn assert query_mock.mock_calls[0] == \ call(in_dn='sys/svc-ext/vmedia-svc') # Scenario: Three pre-existing mappings vmedia1 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="One.iso") vmedia1.remote_share = "http://169.254.1.2/" vmedia1.remote_file = "One.iso" vmedia2 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Two") vmedia2.remote_share = "http://169.254.1.2/" vmedia2.remote_file = "Two.iso" vmedia3 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Three") vmedia3.remote_share = "http://169.254.1.2/" vmedia3.remote_file = "Three.iso" query_mock.return_value = [vmedia1, vmedia2, vmedia3] assert vmedia_get_existing_uri(test_cimc) == \ ["http://169.254.1.2/One.iso", "http://169.254.1.2/Two.iso", "http://169.254.1.2/Three.iso"] @patch.object(ImcHandle, 'query_children') @patch.object(ImcHandle, 'login') def test_vmedia_get_existing_status(login_mock, query_mock): # Patch ImcHandle.login to create a Faux ImcHandle w/o real CIMC # Patch ImcHandle.query_children to simulate CIMC interaction login_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # Scenario: No pre-existing mappings query_mock.return_value = [] assert vmedia_get_existing_status(test_cimc) == [] # Assert query_children called with correct in_dn assert query_mock.mock_calls[0] == \ call(in_dn='sys/svc-ext/vmedia-svc') # Scenario: Three pre-existing mappings # mapping_status is not a read-write property, so mock it. vmedia1 = MagicMock() vmedia1.mapping_status = "In-Progress" vmedia2 = MagicMock() vmedia2.mapping_status = "OK" vmedia3 = MagicMock() vmedia3.mapping_status = "Error" query_mock.return_value = [vmedia1, vmedia2, vmedia3] assert vmedia_get_existing_status(test_cimc) == \ ["In-Progress", "OK", "Error"] @patch('imcsdk.apis.server.vmedia.vmedia_get_existing_status') @patch('imcsdk.apis.server.vmedia.vmedia_get_existing_uri') @patch('imcsdk.apis.server.vmedia.vmedia_mount_create') @patch.object(ImcHandle, 'login') def test_valid_vmedia_mount_iso_uri(login_mock, add_mount_mock, exist_mock, state_mock): # Patch ImcHandle.login to create a Faux ImcHandle object w/o real CIMC # Patch vmedia_mount_create to simulate CIMC interaction w/o real CIMC # Patch vmedia_get_existing_uri to simulate existing ISOs # Patch vmedia_get_existing_status to simulate ISO status login_mock.return_value = True add_mount_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # http mapping succeeded exist_mock.return_value = ["http://169.254.1.2/test.iso"] state_mock.side_effect = [['In Progress'], ['OK']] assert vmedia_mount_iso_uri( test_cimc, 'http://169.254.1.2/test.iso', interval=1 ) is True # Assert values of the mount options assert add_mount_mock.call_args[1] == { 'volume_name': 'test.iso', 'map': 'www', 'mount_options': 'noauto', 'remote_share': "http://169.254.1.2/", 'remote_file': 'test.iso', 'username': '', 'password': '', 'server_id': 1 } # https mapping succeeded exist_mock.return_value = ["https://169.254.1.2/test.iso"] state_mock.side_effect = [['In Progress'], ['OK']] assert vmedia_mount_iso_uri( test_cimc, 'https://169.254.1.2/test.iso', interval=1 ) is True # Assert values of the mount options assert add_mount_mock.call_args[1] == { 'volume_name': 'test.iso', 'mount_options': 'noauto', 'map': 'www', 'remote_share': "https://169.254.1.2/", 'remote_file': 'test.iso', 'username': '', 'password': '', 'server_id': 1 } # CIFS mapping succeeded exist_mock.return_value = ["//169.254.1.2/test.iso"] state_mock.side_effect = [['In Progress'], ['OK']] assert vmedia_mount_iso_uri( test_cimc, '//169.254.1.2/test.iso', interval=1 ) is True # Assert values of the object passed to add_mo() assert add_mount_mock.call_args[1] == { 'volume_name': 'test.iso', 'mount_options': 'noauto', 'map': 'cifs', 'remote_share': "//169.254.1.2/", 'remote_file': 'test.iso', 'username': '', 'password': '', 'server_id': 1 } # NFS mapping succeeded exist_mock.return_value = ["169.254.1.2:/test.iso"] state_mock.side_effect = [['In Progress'], ['OK']] assert vmedia_mount_iso_uri( test_cimc, '169.254.1.2:/test.iso', interval=1 ) is True # Assert values of the object passed to add_mo() assert add_mount_mock.call_args[1] == { 'volume_name': 'test.iso', 'mount_options': 'noauto', 'map': 'nfs', 'remote_share': "169.254.1.2:/", 'remote_file': 'test.iso', 'username': '', 'password': '', 'server_id': 1 } @patch('imcsdk.apis.server.vmedia.vmedia_get_existing_status') @patch('imcsdk.apis.server.vmedia.vmedia_get_existing_uri') @patch('imcsdk.apis.server.vmedia.vmedia_mount_create') @patch.object(ImcHandle, 'login') def test_invalid_vmedia_mount_iso_uri(login_mock, add_mount_mock, exist_mock, state_mock): # Patch ImcHandle.login to create a Faux ImcHandle object w/o real CIMC # Patch vmedia_mount_create to simulate CIMC interaction w/o real CIMC # Patch vmedia_get_existing_uri to simulate existing ISOs # Patch vmedia_get_existing_status to simulate ISO status login_mock.return_value = True add_mount_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # Scenario: Zero value passed in as check interval assert_raises(ValueError, vmedia_mount_iso_uri, test_cimc, 'http://1.1.1.1/test.iso', interval=0) # Scenario: Invalid protocol exist_mock.side_effect = [[], ["britt://1.1.1.1/test.iso"]] assert_raises(ValueError, vmedia_mount_iso_uri, test_cimc, 'britt://1.1.1.1/test.iso', interval=1) # Scenario: Mapping failed exist_mock.side_effect = [[], []] assert_raises(ImcOperationError, vmedia_mount_iso_uri, test_cimc, 'http://169.254.1.2/test.iso', interval=1) # Scenario: Timeout on state change exist_mock.side_effect = [[], ["http://169.254.1.2/test.iso"]] state_mock.side_effect = [['In Progress'], ['In Progress']] assert_raises(ImcOperationError, vmedia_mount_iso_uri, test_cimc, 'http://169.254.1.2/test.iso', interval=1, timeout=0) # State returns Error exist_mock.side_effect = [[], ["http://169.254.1.2/test.iso"]] state_mock.side_effect = [['In Progress'], ['ERROR: [404] File not found. ']] assert_raises(ImcOperationError, vmedia_mount_iso_uri, test_cimc, 'http://169.254.1.2/test.iso', interval=1) @patch.object(ImcHandle, 'remove_mo') @patch.object(ImcHandle, 'query_children') @patch.object(ImcHandle, 'login') def test_valid_remove_vmedia_all(login_mock, query_mock, remove_mock): # Patch ImcHandle.login to create a Faux ImcHandle object w/o real CIMC # Patch ImcHandle.query_children to simulate CIMC interaction w/o real CIMC # Patch ImcHandle.remove_mo to simulate CIMC interaction w/o real CIMC login_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # Scenario: server has no vmedia mounts query_mock.return_value = [] assert vmedia_mount_remove_all(test_cimc) is True assert remove_mock.mock_calls == [] # Scenario: Three pre-exising mounts, removed successfully vmedia1 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="One.iso") vmedia1.remote_share = "http://169.254.1.2/" vmedia1.remote_file = "One.iso" vmedia2 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Two") vmedia2.remote_share = "http://169.254.1.2/" vmedia2.remote_file = "Two.iso" vmedia3 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Three") vmedia3.remote_share = "http://169.254.1.2/" vmedia3.remote_file = "Three.iso" # query_mocked call first time in remove_existing_virtual_media # query_mock called a second time in get_existing_virtual_media_uri query_mock.side_effect = [[vmedia1, vmedia2, vmedia3], []] assert vmedia_mount_remove_all(test_cimc) is True assert remove_mock.mock_calls == [call(vmedia1), call(vmedia2), call(vmedia3)] @patch.object(ImcHandle, 'remove_mo') @patch.object(ImcHandle, 'query_children') @patch.object(ImcHandle, 'login') def test_invalid_remove_vmedia_all(login_mock, query_mock, remove_mock): # Patch ImcHandle.login to create a Faux ImcHandle object w/o real CIMC # Patch ImcHandle.query_children to simulate CIMC interaction w/o real CIMC # Patch ImcHandle.remove_mo to simulate CIMC interaction w/o real CIMC login_mock.return_value = True test_cimc = ImcHandle(ip='169.254.1.1', username='admin', password='right') test_cimc._set_platform(platform=IMC_PLATFORM.TYPE_CLASSIC) # Scenario: Three pre-exising mounts, only two unsuccessfully vmedia1 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="One.iso") vmedia1.remote_share = "http://169.254.1.2/" vmedia1.remote_file = "One.iso" vmedia2 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Two") vmedia2.remote_share = "http://169.254.1.2/" vmedia2.remote_file = "Two.iso" vmedia3 = CommVMediaMap(parent_mo_or_dn="sys/svc-ext/vmedia-svc", volume_name="Three") vmedia3.remote_share = "http://169.254.1.2/" vmedia3.remote_file = "Three.iso" query_mock.side_effect = [[vmedia1, vmedia2, vmedia3], [vmedia1]] assert_raises(ImcOperationError, vmedia_mount_remove_all, test_cimc)
41.996732
79
0.650844
1,696
12,851
4.704599
0.117335
0.026319
0.030706
0.029076
0.8189
0.806241
0.787442
0.761624
0.758742
0.746961
0
0.037656
0.231266
12,851
305
80
42.134426
0.770017
0.211735
0
0.714932
0
0
0.213222
0.066309
0
0
0
0
0.113122
1
0.027149
false
0.045249
0.031674
0
0.058824
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
8af57d7abd0ddffcade87399c278e4a4e7a667ba
22,404
py
Python
veritastool/fairness/tests/test_creditscoring.py
mas-veritas2/veritastool
37f36b620c3637e230efd8ed69cbb5e4ef87fe2b
[ "Apache-2.0" ]
2
2022-01-12T07:12:50.000Z
2022-03-08T10:57:10.000Z
veritastool/fairness/tests/test_creditscoring.py
mas-veritas2/veritastool
37f36b620c3637e230efd8ed69cbb5e4ef87fe2b
[ "Apache-2.0" ]
18
2021-11-02T03:03:00.000Z
2021-12-10T07:44:40.000Z
veritastool/fairness/tests/test_creditscoring.py
mas-veritas2/veritastool
37f36b620c3637e230efd8ed69cbb5e4ef87fe2b
[ "Apache-2.0" ]
null
null
null
import pickle from veritastool.model.model_container import ModelContainer from veritastool.fairness.credit_scoring import CreditScoring from veritastool.metrics.performance_metrics import PerformanceMetrics from veritastool.metrics.fairness_metrics import FairnessMetrics from veritastool.fairness.fairness import Fairness # from veritastool.custom.LRwrapper import LRwrapper import numpy as np import pandas as pd import pytest from veritastool.util.errors import * #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) #cre_sco_obj.k = 1 def test_check_input(): cre_sco_obj._model_type_to_metric_lookup[cre_sco_obj.model_params[0].model_type] = ('classification', 4, 2) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj._check_input() assert toolkit_exit.type == MyError cre_sco_obj._model_type_to_metric_lookup[cre_sco_obj.model_params[0].model_type] = ('classification', 2, 1) cre_sco_obj.model_params[0].y_pred = None with pytest.raises(MyError) as toolkit_exit: cre_sco_obj._check_input() assert toolkit_exit.type == MyError def test_check_special_params(): #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = 1 base_default_rate = 2 # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError # cre_sco_obj.spl_params = {'num_applicants': 1, 'base_default_rate': 2} #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': ['3500', 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError # cre_sco_obj.spl_params = {'num_applicants': {'SEX': ['3500', '5000'], 'MARRIAGE': [3500, 5000]}, # 'base_default_rate': {'SEX': [0.1, 0.05], 'MARRIAGE': [0.1, 0.05]}} #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': ['0.10',0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [-3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [-0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000, 3500], 'MARRIAGE':[3500, 5000, 3500]} base_default_rate = {'SEX': [-0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.1, 0.05, 0.1], 'MARRIAGE':[0.1, 0.05, 0.1]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.001,0.005], 'MARRIAGE':[0.001,0.005]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500, 5000], 'MARRIAGE':[3500, 5000]} base_default_rate = {'SEX': [0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) with pytest.raises(MyError) as toolkit_exit: cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant",fair_metric_name = "mi_independence", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) assert toolkit_exit.type == MyError def test_get_confusion_matrix(): #Load Credit Scoring Test Data #PATH = os.path.abspath(os.path.dirname(__file__)) file = "veritastool/resources/data/credit_score_dict.pickle" input_file = open(file, "rb") cs = pickle.load(input_file) #Reduce into two classes cs["X_train"]['MARRIAGE'] = cs["X_train"]['MARRIAGE'].replace([0, 3],1) cs["X_test"]['MARRIAGE'] = cs["X_test"]['MARRIAGE'].replace([0, 3],1) #Model Contariner Parameters y_true = np.array(cs["y_test"]) y_pred = np.array(cs["y_pred"]) y_train = np.array(cs["y_train"]) p_var = ['SEX', 'MARRIAGE'] p_grp = {'SEX': [1], 'MARRIAGE':[1]} x_train = cs["X_train"] x_test = cs["X_test"] model_object = cs["model"] # model_object = LogisticRegression(C=0.1) model_name = "credit scoring" model_type = "credit" y_prob = cs["y_prob"] #rejection inference num_applicants = {'SEX': [3500.0, 5000.0], 'MARRIAGE':[3500.0, 5000.0]} base_default_rate = {'SEX': [0.10,0.05], 'MARRIAGE':[0.10,0.05]} # model_object = LRwrapper(model_object) #Create Model Container and Use Case Object container = ModelContainer(y_true = y_true, y_train = y_train, p_var = p_var, p_grp = p_grp, x_train = x_train, x_test = x_test, model_object = model_object, model_type = model_type,model_name = model_name, y_pred= y_pred, y_prob= y_prob) cre_sco_obj= CreditScoring(model_params = [container], fair_threshold = 0.43, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", fairness_metric_value_input = {'SEX':{'fpr_parity': 0.2} }, num_applicants =num_applicants, base_default_rate=base_default_rate) result = cre_sco_obj._get_confusion_matrix(None,None,None) assert len(result) == 4 assert result[0] == None result = cre_sco_obj._get_confusion_matrix(None,None,0.25,curr_p_var = 'SEX') assert len(result) == 8 assert result[0] == None cre_sco_obj.spl_params = {'num_applicants': {'SEX': [3500, 5000], 'MARRIAGE': [3500, 5000]}, 'base_default_rate': {'SEX': [0.1, 0.05], 'MARRIAGE': [0.1, 0.05]}} result = cre_sco_obj._get_confusion_matrix(y_true,y_pred, None) assert len(result) == 4 assert result == (507, 61, 539.0, 7393.0) cre_sco_obj._rejection_inference_flag = {'SEX': False, 'MARRIAGE': False} cre_sco_obj.spl_params = {'num_applicants': {'SEX': [3500, 5000], 'MARRIAGE': [3500, 5000]}, 'base_default_rate': {'SEX': [0.1, 0.05], 'MARRIAGE': [0.1, 0.05]}} result = cre_sco_obj._get_confusion_matrix(y_true=y_true,y_pred=y_pred,sample_weight = np.array([0.7 for x in range(len(y_pred))]),curr_p_var = 'SEX', feature_mask = cre_sco_obj.feature_mask) assert len(result) == 8 assert result == (113.4000000000003, 18.199999999999992, 30.79999999999998, 45.500000000000036, 241.49999999999852, 24.499999999999986, 26.599999999999984, 24.499999999999986)
47.067227
245
0.650911
3,187
22,404
4.277063
0.048949
0.014526
0.040716
0.02421
0.933827
0.932874
0.926564
0.918348
0.918348
0.912332
0
0.039913
0.203758
22,404
475
246
47.166316
0.7242
0.145644
0
0.873817
0
0
0.156403
0.029453
0
0
0
0
0.059937
1
0.009464
false
0
0.031546
0
0.041009
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
c1da020d49ce08ffb439dc0a160daf851022938e
5,337
py
Python
test/test_config_clientagent.py
Hawkheart/Astron
3a15606ab15b63b666fdff1e0145417470232dbc
[ "BSD-3-Clause" ]
null
null
null
test/test_config_clientagent.py
Hawkheart/Astron
3a15606ab15b63b666fdff1e0145417470232dbc
[ "BSD-3-Clause" ]
null
null
null
test/test_config_clientagent.py
Hawkheart/Astron
3a15606ab15b63b666fdff1e0145417470232dbc
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python2 import unittest from common.unittests import ConfigTest from common.dcfile import * class TestConfigClientAgent(ConfigTest): def test_clientagent_good(self): config = """\ messagedirector: bind: 127.0.0.1:57123 general: dc_files: - %r uberdogs: - id: 1234 class: UberDog1 anonymous: true - id: 1235 class: UberDog2 anonymous: false roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 3100 max: 3999 client: relocate: true add_interest: enabled - type: clientagent bind: 127.0.0.1:57135 version: "Sword Art Online v5.1" client: type: libastron add_interest: disabled channels: min: 110600 max: 110699 - type: clientagent bind: 127.0.0.1:57144 version: "Sword Art Online v5.1" client: type: libastron add_interest: visible channels: min: 220600 max: 220699 """ % test_dc self.assertEquals(self.checkConfig(config), 'Valid') def test_ca_invalid_attr(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 3100 max: 3999 weeuuweeeuu: sirens! """ self.assertEquals(self.checkConfig(config), 'Invalid') def test_ca_invalid_channels(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 0 max: 3999 """ self.assertEquals(self.checkConfig(config), 'Invalid') config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 3100 max: 0 """ self.assertEquals(self.checkConfig(config), 'Invalid') def test_ca_reserved_channels(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 100 max: 3999 """ self.assertEquals(self.checkConfig(config), 'Invalid') config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" channels: min: 3100 max: 999 """ self.assertEquals(self.checkConfig(config), 'Invalid') def test_ca_client_type_typo(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: 127.0.0.1:57128 version: "Sword Art Online v5.1" client: type: astron channels: min: 3100 max: 3999 """ self.assertEquals(self.checkConfig(config), 'Invalid') def test_ca_bind_address(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: pizza:2314 version: "Sword Art Online v5.1" channels: min: 3100 max: 3999 """ self.assertEquals(self.checkConfig(config), 'Invalid') # ipv6 test disabled because client agent can't accept them yet, and causes a crash config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: clientagent bind: ::1:2314 version: "Sword Art Online v5.1" channels: min: 3100 max: 3999 """ #self.assertEquals(self.checkConfig(config), 'Valid') if __name__ == '__main__': unittest.main()
29.486188
91
0.417276
454
5,337
4.837004
0.211454
0.057377
0.065574
0.07377
0.755464
0.755464
0.734062
0.711293
0.70674
0.612022
0
0.123638
0.501405
5,337
180
92
29.65
0.701616
0.029043
0
0.72549
0
0
0.788183
0
0
0
0
0
0.052288
1
0.039216
false
0
0.019608
0
0.065359
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
a9bcc77df02d87548c72dd5b8ca7058f19553dbf
127
py
Python
mymodule.py
lzcdev/EasyPythonDemo
6e8af845d08a47e378c5c7d170dfa45fd7e362e3
[ "MIT" ]
null
null
null
mymodule.py
lzcdev/EasyPythonDemo
6e8af845d08a47e378c5c7d170dfa45fd7e362e3
[ "MIT" ]
null
null
null
mymodule.py
lzcdev/EasyPythonDemo
6e8af845d08a47e378c5c7d170dfa45fd7e362e3
[ "MIT" ]
null
null
null
#!usr/bin/python from mymudule import sayhi, version # Alternative: # from mymudule import * sayhi() print 'Version', version
15.875
35
0.748031
16
127
5.9375
0.625
0.252632
0.378947
0.484211
0
0
0
0
0
0
0
0
0.141732
127
8
36
15.875
0.87156
0.401575
0
0
0
0
0.094595
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.333333
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
7
a9e28bbf9ab70446b598d80ff5d1ee7c74014aac
106
py
Python
custom_cropping.py
namantuli18/Amazon-Web-Services-Hackathon
e4a8ab53aaa55068dda06e73ef1f9e2c30b8969d
[ "MIT" ]
2
2020-09-20T16:58:17.000Z
2020-09-30T15:01:53.000Z
custom_cropping.py
namantuli18/Amazon-Web-Services-Hackathon
e4a8ab53aaa55068dda06e73ef1f9e2c30b8969d
[ "MIT" ]
null
null
null
custom_cropping.py
namantuli18/Amazon-Web-Services-Hackathon
e4a8ab53aaa55068dda06e73ef1f9e2c30b8969d
[ "MIT" ]
1
2020-09-15T12:42:51.000Z
2020-09-15T12:42:51.000Z
import torch import cv2 import tqdm import numpy as np import glob import numpy as np import pandas as pd
13.25
19
0.811321
20
106
4.3
0.5
0.255814
0.302326
0.348837
0.488372
0
0
0
0
0
0
0.011628
0.188679
106
7
20
15.142857
0.988372
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
99b39528107b9fad8953ee50fdd64cc1e3b82241
1,640
py
Python
Simulations/generate_data.py
aagarwal1996/additive_trees
3c24d35b1023c5b569f4e7cdb3821916e406f321
[ "MIT" ]
null
null
null
Simulations/generate_data.py
aagarwal1996/additive_trees
3c24d35b1023c5b569f4e7cdb3821916e406f321
[ "MIT" ]
null
null
null
Simulations/generate_data.py
aagarwal1996/additive_trees
3c24d35b1023c5b569f4e7cdb3821916e406f321
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import statistics import sys ''' This script is used for the generation of simulation data. Specifically, ''' def sample_uniform_X(n,d): X = np.random.uniform(0,1.0,(n,d)) return X def sample_boolean_X(n,d): X = np.random.randint(0,2.0,(n,d)) return X def linear_model(X,s,beta,sigma): ''' This method is used to crete responses from a linear model with hard sparsity Parameters: X: X matrix s: sparsity beta: coefficient vector. If beta not a vector, then assumed that sigma: s.d. of added noise Returns: numpy array of shape (n) ''' def create_y(x,s,beta): linear_term = 0 for i in range(s): linear_term += x[i]*beta return linear_term y_train = np.array([create_y(X[i, :],s,beta) for i in range(len(X))]) y_train = y_train + sigma * np.random.randn((len(X))) return y_train def sum_of_squares(X,s,beta,sigma): ''' This method is used to crete responses from a sum of squares model with hard sparsity Parameters: X: X matrix s: sparsity beta: coefficient vector. If beta not a vector, then assumed that sigma: s.d. of added noise Returns: numpy array of shape (n) ''' def create_y(x,s,beta): linear_term = 0 for i in range(s): linear_term += x[i]*x[i]*beta return linear_term y_train = np.array([create_y(X[i, :],s,beta) for i in range(len(X))]) y_train = y_train + sigma * np.random.randn((len(X))) return y_train
25.625
89
0.617683
269
1,640
3.672862
0.275093
0.048583
0.024292
0.044534
0.763158
0.763158
0.712551
0.712551
0.712551
0.712551
0
0.006728
0.275
1,640
64
90
25.625
0.824222
0.307927
0
0.551724
0
0
0
0
0
0
0
0
0
1
0.206897
false
0
0.172414
0
0.586207
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
7
99e7925587178ab11923854249de48ddf52a6141
23,892
py
Python
sdk/python/pulumi_gcp/compute/firewall_policy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
121
2018-06-18T19:16:42.000Z
2022-03-31T06:06:48.000Z
sdk/python/pulumi_gcp/compute/firewall_policy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
492
2018-06-22T19:41:03.000Z
2022-03-31T15:33:53.000Z
sdk/python/pulumi_gcp/compute/firewall_policy.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
43
2018-06-19T01:43:13.000Z
2022-03-23T22:43:37.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['FirewallPolicyArgs', 'FirewallPolicy'] @pulumi.input_type class FirewallPolicyArgs: def __init__(__self__, *, parent: pulumi.Input[str], short_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a FirewallPolicy resource. :param pulumi.Input[str] parent: The parent of the firewall policy. :param pulumi.Input[str] short_name: User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. """ pulumi.set(__self__, "parent", parent) pulumi.set(__self__, "short_name", short_name) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def parent(self) -> pulumi.Input[str]: """ The parent of the firewall policy. """ return pulumi.get(self, "parent") @parent.setter def parent(self, value: pulumi.Input[str]): pulumi.set(self, "parent", value) @property @pulumi.getter(name="shortName") def short_name(self) -> pulumi.Input[str]: """ User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "short_name") @short_name.setter def short_name(self, value: pulumi.Input[str]): pulumi.set(self, "short_name", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class _FirewallPolicyState: def __init__(__self__, *, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, fingerprint: Optional[pulumi.Input[str]] = None, firewall_policy_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, rule_tuple_count: Optional[pulumi.Input[int]] = None, self_link: Optional[pulumi.Input[str]] = None, self_link_with_id: Optional[pulumi.Input[str]] = None, short_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering FirewallPolicy resources. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] fingerprint: Fingerprint of the resource. This field is used internally during updates of this resource. :param pulumi.Input[str] firewall_policy_id: The unique identifier for the resource. This identifier is defined by the server. :param pulumi.Input[str] name: Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy. :param pulumi.Input[str] parent: The parent of the firewall policy. :param pulumi.Input[int] rule_tuple_count: Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. :param pulumi.Input[str] self_link: Server-defined URL for the resource. :param pulumi.Input[str] self_link_with_id: Server-defined URL for this resource with the resource id. :param pulumi.Input[str] short_name: User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ if creation_timestamp is not None: pulumi.set(__self__, "creation_timestamp", creation_timestamp) if description is not None: pulumi.set(__self__, "description", description) if fingerprint is not None: pulumi.set(__self__, "fingerprint", fingerprint) if firewall_policy_id is not None: pulumi.set(__self__, "firewall_policy_id", firewall_policy_id) if name is not None: pulumi.set(__self__, "name", name) if parent is not None: pulumi.set(__self__, "parent", parent) if rule_tuple_count is not None: pulumi.set(__self__, "rule_tuple_count", rule_tuple_count) if self_link is not None: pulumi.set(__self__, "self_link", self_link) if self_link_with_id is not None: pulumi.set(__self__, "self_link_with_id", self_link_with_id) if short_name is not None: pulumi.set(__self__, "short_name", short_name) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> Optional[pulumi.Input[str]]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @creation_timestamp.setter def creation_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "creation_timestamp", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def fingerprint(self) -> Optional[pulumi.Input[str]]: """ Fingerprint of the resource. This field is used internally during updates of this resource. """ return pulumi.get(self, "fingerprint") @fingerprint.setter def fingerprint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "fingerprint", value) @property @pulumi.getter(name="firewallPolicyId") def firewall_policy_id(self) -> Optional[pulumi.Input[str]]: """ The unique identifier for the resource. This identifier is defined by the server. """ return pulumi.get(self, "firewall_policy_id") @firewall_policy_id.setter def firewall_policy_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "firewall_policy_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def parent(self) -> Optional[pulumi.Input[str]]: """ The parent of the firewall policy. """ return pulumi.get(self, "parent") @parent.setter def parent(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "parent", value) @property @pulumi.getter(name="ruleTupleCount") def rule_tuple_count(self) -> Optional[pulumi.Input[int]]: """ Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. """ return pulumi.get(self, "rule_tuple_count") @rule_tuple_count.setter def rule_tuple_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_tuple_count", value) @property @pulumi.getter(name="selfLink") def self_link(self) -> Optional[pulumi.Input[str]]: """ Server-defined URL for the resource. """ return pulumi.get(self, "self_link") @self_link.setter def self_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link", value) @property @pulumi.getter(name="selfLinkWithId") def self_link_with_id(self) -> Optional[pulumi.Input[str]]: """ Server-defined URL for this resource with the resource id. """ return pulumi.get(self, "self_link_with_id") @self_link_with_id.setter def self_link_with_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link_with_id", value) @property @pulumi.getter(name="shortName") def short_name(self) -> Optional[pulumi.Input[str]]: """ User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "short_name") @short_name.setter def short_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "short_name", value) class FirewallPolicy(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, short_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Hierarchical firewall policy rules let you create and enforce a consistent firewall policy across your organization. Rules can explicitly allow or deny connections or delegate evaluation to lower level policies. Policies can be created within organizations or folders. This resource should be generally be used with `compute.FirewallPolicyAssociation` and `compute.FirewallPolicyRule` For more information see the [official documentation](https://cloud.google.com/vpc/docs/firewall-policies) ## Example Usage ```python import pulumi import pulumi_gcp as gcp default = gcp.compute.FirewallPolicy("default", description="Example Resource", parent="organizations/12345", short_name="my-policy") ``` ## Import FirewallPolicy can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/firewallPolicy:FirewallPolicy default locations/global/firewallPolicies/{{name}} ``` ```sh $ pulumi import gcp:compute/firewallPolicy:FirewallPolicy default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] parent: The parent of the firewall policy. :param pulumi.Input[str] short_name: User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ ... @overload def __init__(__self__, resource_name: str, args: FirewallPolicyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Hierarchical firewall policy rules let you create and enforce a consistent firewall policy across your organization. Rules can explicitly allow or deny connections or delegate evaluation to lower level policies. Policies can be created within organizations or folders. This resource should be generally be used with `compute.FirewallPolicyAssociation` and `compute.FirewallPolicyRule` For more information see the [official documentation](https://cloud.google.com/vpc/docs/firewall-policies) ## Example Usage ```python import pulumi import pulumi_gcp as gcp default = gcp.compute.FirewallPolicy("default", description="Example Resource", parent="organizations/12345", short_name="my-policy") ``` ## Import FirewallPolicy can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/firewallPolicy:FirewallPolicy default locations/global/firewallPolicies/{{name}} ``` ```sh $ pulumi import gcp:compute/firewallPolicy:FirewallPolicy default {{name}} ``` :param str resource_name: The name of the resource. :param FirewallPolicyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(FirewallPolicyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, short_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = FirewallPolicyArgs.__new__(FirewallPolicyArgs) __props__.__dict__["description"] = description if parent is None and not opts.urn: raise TypeError("Missing required property 'parent'") __props__.__dict__["parent"] = parent if short_name is None and not opts.urn: raise TypeError("Missing required property 'short_name'") __props__.__dict__["short_name"] = short_name __props__.__dict__["creation_timestamp"] = None __props__.__dict__["fingerprint"] = None __props__.__dict__["firewall_policy_id"] = None __props__.__dict__["name"] = None __props__.__dict__["rule_tuple_count"] = None __props__.__dict__["self_link"] = None __props__.__dict__["self_link_with_id"] = None super(FirewallPolicy, __self__).__init__( 'gcp:compute/firewallPolicy:FirewallPolicy', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, fingerprint: Optional[pulumi.Input[str]] = None, firewall_policy_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, rule_tuple_count: Optional[pulumi.Input[int]] = None, self_link: Optional[pulumi.Input[str]] = None, self_link_with_id: Optional[pulumi.Input[str]] = None, short_name: Optional[pulumi.Input[str]] = None) -> 'FirewallPolicy': """ Get an existing FirewallPolicy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] fingerprint: Fingerprint of the resource. This field is used internally during updates of this resource. :param pulumi.Input[str] firewall_policy_id: The unique identifier for the resource. This identifier is defined by the server. :param pulumi.Input[str] name: Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy. :param pulumi.Input[str] parent: The parent of the firewall policy. :param pulumi.Input[int] rule_tuple_count: Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. :param pulumi.Input[str] self_link: Server-defined URL for the resource. :param pulumi.Input[str] self_link_with_id: Server-defined URL for this resource with the resource id. :param pulumi.Input[str] short_name: User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _FirewallPolicyState.__new__(_FirewallPolicyState) __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description __props__.__dict__["fingerprint"] = fingerprint __props__.__dict__["firewall_policy_id"] = firewall_policy_id __props__.__dict__["name"] = name __props__.__dict__["parent"] = parent __props__.__dict__["rule_tuple_count"] = rule_tuple_count __props__.__dict__["self_link"] = self_link __props__.__dict__["self_link_with_id"] = self_link_with_id __props__.__dict__["short_name"] = short_name return FirewallPolicy(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> pulumi.Output[str]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @property @pulumi.getter def fingerprint(self) -> pulumi.Output[str]: """ Fingerprint of the resource. This field is used internally during updates of this resource. """ return pulumi.get(self, "fingerprint") @property @pulumi.getter(name="firewallPolicyId") def firewall_policy_id(self) -> pulumi.Output[str]: """ The unique identifier for the resource. This identifier is defined by the server. """ return pulumi.get(self, "firewall_policy_id") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy. """ return pulumi.get(self, "name") @property @pulumi.getter def parent(self) -> pulumi.Output[str]: """ The parent of the firewall policy. """ return pulumi.get(self, "parent") @property @pulumi.getter(name="ruleTupleCount") def rule_tuple_count(self) -> pulumi.Output[int]: """ Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. """ return pulumi.get(self, "rule_tuple_count") @property @pulumi.getter(name="selfLink") def self_link(self) -> pulumi.Output[str]: """ Server-defined URL for the resource. """ return pulumi.get(self, "self_link") @property @pulumi.getter(name="selfLinkWithId") def self_link_with_id(self) -> pulumi.Output[str]: """ Server-defined URL for this resource with the resource id. """ return pulumi.get(self, "self_link_with_id") @property @pulumi.getter(name="shortName") def short_name(self) -> pulumi.Output[str]: """ User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "short_name")
48.169355
525
0.672736
2,964
23,892
5.238529
0.082659
0.060218
0.069427
0.06376
0.851742
0.817608
0.776583
0.752238
0.742513
0.713274
0
0.005335
0.239034
23,892
495
526
48.266667
0.848688
0.432906
0
0.532075
1
0
0.104084
0.003329
0
0
0
0
0
1
0.162264
false
0.003774
0.018868
0
0.283019
0.049057
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4131e83429eb30f1f02e6c81cbff70a8677a23ea
7,427
py
Python
authors/apps/notifications/tests/test_notification.py
andela/ah-magnificent6
daf55ce4819f57cec8510c5726e86a0b1e78e3e1
[ "BSD-3-Clause" ]
null
null
null
authors/apps/notifications/tests/test_notification.py
andela/ah-magnificent6
daf55ce4819f57cec8510c5726e86a0b1e78e3e1
[ "BSD-3-Clause" ]
75
2018-08-28T08:37:04.000Z
2022-03-11T23:29:08.000Z
authors/apps/notifications/tests/test_notification.py
andela/ah-magnificent6
daf55ce4819f57cec8510c5726e86a0b1e78e3e1
[ "BSD-3-Clause" ]
2
2018-08-27T08:13:55.000Z
2018-12-18T09:17:26.000Z
from .base_setup import Base from rest_framework import status from django.urls import reverse from django.core import mail from authors.apps.authentication.models import User from authors.apps.profiles.models import Profile from authors.apps.core.cron import EmailNotificationCron class ArticleDeleteUpdateTests(Base): """Test suite for favouriting articles.""" def setUp(self): """Setup data for the tests.""" super().setUp() self.res = self.client.post( self.article_url, self.article_data, format="json", **self.headers_one) def tearDown(self): """Teardown for the tests.""" super().tearDown() def test_successfull_notification(self): """ Tests that a user successfully receiving notifications. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(notification.status_code, status.HTTP_200_OK) def test_successfully_get_a_notification(self): """ Tests that a user can get a notification. """ EmailNotificationCron().do() self.assertEqual(len(mail.outbox), 3) notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] response = self.client.get( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_delete_notification(self): """ Tests that a user can delete a notification. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.delete( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(delete.status_code, status.HTTP_200_OK) def test_unsuccessfully_delete_notification(self): """ Tests that a user cannot delete a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.delete( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_unsuccessfully_mark_read_notification(self): """ Tests that a user cannot mark read a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_successfully_mark_read_notification(self): """ Tests that a user successfully marks as read. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(delete.status_code, status.HTTP_200_OK) def test_unsuccessfully_mark_read_notification(self): """ Tests that a user cannot mark as read a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_successfully_mark_all_notification_as_read(self): """ Tests that a user successfully marks all as read. """ notification = self.client.put( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(notification.status_code, status.HTTP_200_OK) response = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_unsuccessfully_mark_non_existing_notification(self): """ Tests that a user unssuccessful marks as read non existing notification. """ response = self.client.put( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_unsuccessfully_delete_non_existing_notification(self): """ Tests that a user unsuccessfully deletes non-existing notification. """ response = self.client.delete( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_unsuccessfully_get_non_existing_notification(self): """ Tests that a user unsuccessfully gets non-existing notification. """ response = self.client.get( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_successfully_activate_app_notification(self): """ Tests that a user successfully activating notifications. """ response = self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_deactivate_app_notification(self): """ Tests that a user successfully deactivating notifications. """ self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_one) response = self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_one) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_activate_email_notification(self): """ Tests that a user successfully activating notifications. """ response = self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_deactivate_email_notification(self): """ Tests that a user successfully deactivating notifications. """ self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_one) response = self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_one) self.assertEqual(response.status_code, status.HTTP_200_OK)
39.505319
80
0.650195
791
7,427
5.900126
0.12263
0.053568
0.050996
0.068567
0.849582
0.841654
0.821727
0.777159
0.751446
0.714806
0
0.011435
0.246398
7,427
187
81
39.716578
0.822405
0.129797
0
0.714286
0
0
0.124549
0.120944
0
0
0
0
0.142857
1
0.142857
false
0
0.058824
0
0.210084
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4170f433bb97caf67171fac3293b0a6b1c2d0603
66
py
Python
tests/test_Monthly_Savings_calculator.py
valli180/Monthly_Savings_Calculator
ce4e0c9ec49ba249ab73b8b177f1146be6ed6715
[ "MIT" ]
null
null
null
tests/test_Monthly_Savings_calculator.py
valli180/Monthly_Savings_Calculator
ce4e0c9ec49ba249ab73b8b177f1146be6ed6715
[ "MIT" ]
null
null
null
tests/test_Monthly_Savings_calculator.py
valli180/Monthly_Savings_Calculator
ce4e0c9ec49ba249ab73b8b177f1146be6ed6715
[ "MIT" ]
null
null
null
from Monthly_Savings_calculator import Monthly_Savings_calculator
33
65
0.939394
8
66
7.25
0.625
0.482759
0.827586
0
0
0
0
0
0
0
0
0
0.060606
66
1
66
66
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
4185ff9e4540cc266e2717d07955343fcb5f2fd7
28,281
py
Python
scale/scheduler/test/node/test_node_class.py
stevevarner/scale
9623b261db4ddcf770f00df16afc91176142bb7c
[ "Apache-2.0" ]
null
null
null
scale/scheduler/test/node/test_node_class.py
stevevarner/scale
9623b261db4ddcf770f00df16afc91176142bb7c
[ "Apache-2.0" ]
null
null
null
scale/scheduler/test/node/test_node_class.py
stevevarner/scale
9623b261db4ddcf770f00df16afc91176142bb7c
[ "Apache-2.0" ]
null
null
null
from __future__ import unicode_literals import datetime import django from django.test import TestCase from django.utils.timezone import now from mock import patch from job.execution.job_exe import RunningJobExecution from job.execution.tasks.cleanup_task import CLEANUP_TASK_ID_PREFIX from job.tasks.health_task import HEALTH_TASK_ID_PREFIX, HealthTask from job.tasks.manager import TaskManager from job.tasks.pull_task import PULL_TASK_ID_PREFIX from job.tasks.update import TaskStatusUpdate from job.test import utils as job_test_utils from node.test import utils as node_test_utils from scheduler.cleanup.node import JOB_EXES_WARNING_THRESHOLD from scheduler.models import Scheduler from scheduler.node.conditions import NodeConditions from scheduler.node.node_class import Node from util.parse import datetime_to_string class TestNode(TestCase): def setUp(self): django.setup() self.scheduler = Scheduler() self.node_agent = 'agent_1' self.node = node_test_utils.create_node(hostname='host_1', slave_id=self.node_agent) self.job_exe = job_test_utils.create_running_job_exe(agent_id=self.node_agent, node=self.node) self.task_mgr = TaskManager() @patch('scheduler.node.conditions.now') def test_generate_status_json(self, mock_now): """Tests calling generate_status_json() successfully""" right_now = now() mock_now.return_value = right_now num_job_exes = JOB_EXES_WARNING_THRESHOLD + 1 node = Node(self.node_agent, self.node, self.scheduler) node._conditions.handle_pull_task_failed() node._conditions.update_cleanup_count(num_job_exes) node._update_state() nodes_list = [] node.generate_status_json(nodes_list) expected_results = [{'id': node.id, 'hostname': node.hostname, 'agent_id': self.node_agent, 'is_active': True, 'state': {'name': 'DEGRADED', 'title': Node.DEGRADED.title, 'description': Node.DEGRADED.description}, 'errors': [{'name': 'IMAGE_PULL', 'title': NodeConditions.IMAGE_PULL_ERR.title, 'description': NodeConditions.IMAGE_PULL_ERR.description, 'started': datetime_to_string(right_now), 'last_updated': datetime_to_string(right_now)}], 'warnings': [{'name': 'CLEANUP', 'title': NodeConditions.CLEANUP_WARNING.title, 'description': NodeConditions.CLEANUP_WARNING.description % num_job_exes, 'started': datetime_to_string(right_now), 'last_updated': datetime_to_string(right_now)}]}] self.assertListEqual(nodes_list, expected_results) def test_handle_failed_cleanup_task(self): """Tests handling failed cleanup task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when # Get initial cleanup task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) task_1_id = task.id # Fail task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # No new cleanup task right away tasks = node.get_next_tasks(when + datetime.timedelta(seconds=5)) self.assertListEqual([], tasks) self.assertFalse(node._is_initial_cleanup_completed) # After error threshold, we should get new cleanup task new_time = when + Node.CLEANUP_ERR_THRESHOLD + datetime.timedelta(seconds=5) node._last_heath_task = new_time # Get rid of health check task task = node.get_next_tasks(new_time)[0] self.assertNotEqual(task.id, task_1_id) self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) def test_handle_initial_cleanup_task(self): """Tests handling the initial cleanup task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when # Get initial cleanup task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertTrue(task.is_initial_cleanup) self.assertEqual(task.agent_id, self.node_agent) # Schedule initial cleanup and make sure no new task is ready self.task_mgr.launch_tasks([task], now()) self.assertListEqual([], node.get_next_tasks(when)) self.assertFalse(node._is_initial_cleanup_completed) # Complete initial clean up, verify no new cleanup task update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FINISHED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) for task in node.get_next_tasks(when): self.assertFalse(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertTrue(node._is_initial_cleanup_completed) def test_handle_killed_cleanup_task(self): """Tests handling killed cleanup task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) # Get initial cleanup task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) task_1_id = task.id # Kill task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.KILLED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_initial_cleanup_completed) def test_handle_lost_cleanup_tasks(self): """Tests handling lost cleanup tasks""" when = now() node = Node(self.node_agent, self.node, self.scheduler) # Get initial cleanup task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) task_1_id = task.id # Lose task without scheduling and get different task next time update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_initial_cleanup_completed) # Lose task with scheduling and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_initial_cleanup_completed) # Lose task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_initial_cleanup_completed) def test_handle_regular_cleanup_task(self): """Tests handling a regular cleanup task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # No task since there are no job executions to clean self.assertListEqual([], node.get_next_tasks(when)) # Add job execution and complete task to clean it up node.add_job_execution(self.job_exe) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(CLEANUP_TASK_ID_PREFIX)) self.assertFalse(task.is_initial_cleanup) self.assertListEqual(task.job_exes, [self.job_exe]) self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FINISHED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # No task since all job executions have been cleaned self.assertListEqual([], node.get_next_tasks(when)) def test_paused_node_cleanup_task(self): """Tests not returning cleanup task when its node is paused""" when = now() paused_node = node_test_utils.create_node(hostname='host_1_paused', slave_id='agent_paused') paused_node.is_paused = True node = Node('agent_paused', paused_node, self.scheduler) # Turn off health task node._last_heath_task = when # No task due to paused node self.assertListEqual([], node.get_next_tasks(when)) def test_handle_failed_health_task(self): """Tests handling failed health task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get health task task = node.get_next_tasks(when)[0] task_1_id = task.id self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) # Fail task after running self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # Check node state self.assertEqual(node._state, Node.DEGRADED) self.assertTrue(NodeConditions.HEALTH_FAIL_ERR.name in node._conditions._active_errors) # No new health task right away tasks = node.get_next_tasks(when + datetime.timedelta(seconds=5)) self.assertListEqual([], tasks) self.assertFalse(node._conditions.is_health_check_normal) # After error threshold, we should get new health task new_time = when + Node.HEALTH_ERR_THRESHOLD + datetime.timedelta(seconds=5) task = node.get_next_tasks(new_time)[0] self.assertNotEqual(task.id, task_1_id) self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) def test_handle_failed_health_task_bad_daemon(self): """Tests handling a failed health task where the Docker daemon is bad""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get health task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) # Fail task with bad daemon exit code self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now(), exit_code=HealthTask.BAD_DAEMON_CODE) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # Check node state self.assertEqual(node._state, Node.DEGRADED) self.assertTrue(NodeConditions.BAD_DAEMON_ERR.name in node._conditions._active_errors) def test_handle_failed_health_task_bad_logstash(self): """Tests handling a failed health task where logstash is unreachable""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get health task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) # Fail task with bad logstash exit code self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now(), exit_code=HealthTask.BAD_LOGSTASH_CODE) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # Check node state self.assertEqual(node._state, Node.DEGRADED) self.assertTrue(NodeConditions.BAD_LOGSTASH_ERR.name in node._conditions._active_errors) def test_handle_failed_health_task_low_docker_space(self): """Tests handling a failed health task where Docker has low disk space""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get health task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) # Fail task with low Docker space exit code self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now(), exit_code=HealthTask.LOW_DOCKER_SPACE_CODE) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # Check node state self.assertEqual(node._state, Node.DEGRADED) self.assertTrue(NodeConditions.LOW_DOCKER_SPACE_ERR.name in node._conditions._active_errors) def test_handle_successful_health_task(self): """Tests handling the health task successfully""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get health task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) self.assertEqual(task.agent_id, self.node_agent) # Schedule health task and make sure no new task is ready self.task_mgr.launch_tasks([task], now()) self.assertListEqual([], node.get_next_tasks(when)) self.assertTrue(node._conditions.is_health_check_normal) # Complete pull task, verify no new task update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FINISHED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) self.assertListEqual([], node.get_next_tasks(when)) self.assertTrue(node._conditions.is_health_check_normal) def test_handle_killed_health_task(self): """Tests handling killed health task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get pull task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) task_1_id = task.id # Kill task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.KILLED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertTrue(node._conditions.is_health_check_normal) def test_handle_lost_health_task(self): """Tests handling lost health task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._initial_cleanup_completed() node._image_pull_completed() node._update_state() # Get pull task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) task_1_id = task.id self.assertIsNotNone(task) # Lose task without scheduling and get different task next time update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertTrue(node._conditions.is_health_check_normal) # Lose task with scheduling and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertTrue(node._conditions.is_health_check_normal) # Lose task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(HEALTH_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertTrue(node._conditions.is_health_check_normal) def test_handle_failed_pull_task(self): """Tests handling failed Docker pull task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._update_state() # Get Docker pull task task = node.get_next_tasks(when)[0] task_1_id = task.id self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) # Fail task after running self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) # No new pull task right away tasks = node.get_next_tasks(when + datetime.timedelta(seconds=5)) self.assertListEqual([], tasks) self.assertFalse(node._is_image_pulled) # After error threshold, we should get new pull task new_time = when + Node.IMAGE_PULL_ERR_THRESHOLD + datetime.timedelta(seconds=5) node._last_heath_task = new_time # Get rid of health check task task = node.get_next_tasks(new_time)[0] self.assertNotEqual(task.id, task_1_id) self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) def test_handle_successful_pull_task(self): """Tests handling the Docker pull task successfully""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._update_state() # Get Docker pull task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) self.assertEqual(task.agent_id, self.node_agent) # Schedule pull task and make sure no new task is ready self.task_mgr.launch_tasks([task], now()) self.assertListEqual([], node.get_next_tasks(when)) self.assertFalse(node._is_image_pulled) # Complete pull task, verify no new task update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FINISHED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) self.assertListEqual([], node.get_next_tasks(when)) self.assertTrue(node._is_image_pulled) # Node should now be ready self.assertEqual(node._state, Node.READY) def test_handle_killed_pull_task(self): """Tests handling killed cleanup task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._update_state() # Get pull task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) task_1_id = task.id # Kill task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.KILLED, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_image_pulled) def test_handle_lost_pull_task(self): """Tests handling lost pull task""" when = now() node = Node(self.node_agent, self.node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._update_state() # Get pull task task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) task_1_id = task.id self.assertIsNotNone(task) # Lose task without scheduling and get different task next time update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_image_pulled) # Lose task with scheduling and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_image_pulled) # Lose task after running and get different task next time self.task_mgr.launch_tasks([task], now()) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now()) self.task_mgr.handle_task_update(update) node.handle_task_update(update) task = node.get_next_tasks(when)[0] self.assertTrue(task.id.startswith(PULL_TASK_ID_PREFIX)) self.assertNotEqual(task.id, task_1_id) self.assertFalse(node._is_image_pulled) def test_paused_node_pull_task(self): """Tests not returning pull task when its node is paused""" when = now() paused_node = node_test_utils.create_node(hostname='host_1_paused', slave_id='agent_paused') paused_node.is_paused = True node = Node('agent_paused', paused_node, self.scheduler) node._last_heath_task = when node._initial_cleanup_completed() node._update_state() tasks = node.get_next_tasks(when) # No task due to paused node self.assertListEqual([], tasks) def test_node_that_is_not_cleaned_yet_no_pull_task(self): """Tests not returning pull task when the node hasn't been cleaned up yet""" when = now() node = Node(self.node_agent, self.node, self.scheduler) tasks = node.get_next_tasks(when) # No pull task due to node not cleaned yet for task in tasks: self.assertFalse(task.id.startswith(PULL_TASK_ID_PREFIX))
46.745455
118
0.692196
3,748
28,281
4.907417
0.047759
0.042734
0.063502
0.087316
0.867613
0.838526
0.828522
0.80846
0.799815
0.778068
0
0.002977
0.216046
28,281
604
119
46.822848
0.82662
0.109084
0
0.80137
0
0
0.011383
0.001158
0
0
0
0
0.226027
1
0.047945
false
0
0.043379
0
0.093607
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
68caffdd1ffd981ee3f1cddb8682094b221647ab
176
py
Python
planar_ising/decomp_ising/__init__.py
ValeryTyumen/planar_ising
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
[ "MIT" ]
8
2019-05-02T20:27:21.000Z
2020-11-01T20:41:38.000Z
planar_ising/decomp_ising/__init__.py
ValeryTyumen/planar_ising
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
[ "MIT" ]
1
2019-09-03T18:15:53.000Z
2019-09-06T16:41:12.000Z
planar_ising/decomp_ising/__init__.py
ValeryTyumen/planar_ising
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
[ "MIT" ]
3
2019-08-11T23:08:58.000Z
2022-03-19T09:09:50.000Z
from .decomp_graph import DecompGraph from .decomp_inference_and_sampling import DecompInferenceAndSampling from .small_inference_and_sampling import SmallInferenceAndSampling
44
69
0.914773
19
176
8.105263
0.578947
0.12987
0.25974
0.337662
0
0
0
0
0
0
0
0
0.068182
176
3
70
58.666667
0.939024
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
ec44ca0e1ccdfd5fc59ca4f8988105c4f3455e80
4,349
py
Python
src/users/migrations/0001_initial.py
raghuraju/Simple-Project-Management
fd7ab82a86bd5f1f7e6d389d4a0ee8de51b845ab
[ "Apache-2.0" ]
null
null
null
src/users/migrations/0001_initial.py
raghuraju/Simple-Project-Management
fd7ab82a86bd5f1f7e6d389d4a0ee8de51b845ab
[ "Apache-2.0" ]
1
2017-04-15T03:48:04.000Z
2017-04-15T03:48:04.000Z
src/users/migrations/0001_initial.py
raghuraju/Simple-Project-Management
fd7ab82a86bd5f1f7e6d389d4a0ee8de51b845ab
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-14 17:33 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Designer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=40)), ('last_name', models.CharField(max_length=40)), ('date_of_birth', models.DateField()), ('joined_on', models.DateField()), ('active', models.BooleanField(default=True)), ('email', models.EmailField(max_length=256)), ('employed_since', models.DateField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Developer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=40)), ('last_name', models.CharField(max_length=40)), ('date_of_birth', models.DateField()), ('joined_on', models.DateField()), ('active', models.BooleanField(default=True)), ('email', models.EmailField(max_length=256)), ('employed_since', models.DateField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Manager', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=40)), ('last_name', models.CharField(max_length=40)), ('date_of_birth', models.DateField()), ('joined_on', models.DateField()), ('active', models.BooleanField(default=True)), ('email', models.EmailField(max_length=256)), ('employed_since', models.DateField()), ('designation', models.CharField(choices=[(0, 'PROJECT'), (1, 'SENIOR'), (2, 'EXECUTIVE')], max_length=3)), ('reports_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Manager')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Team', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30)), ('incharge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Manager')), ], ), migrations.CreateModel( name='Tester', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=40)), ('last_name', models.CharField(max_length=40)), ('date_of_birth', models.DateField()), ('joined_on', models.DateField()), ('active', models.BooleanField(default=True)), ('email', models.EmailField(max_length=256)), ('employed_since', models.DateField()), ('automation', models.BooleanField(default=False)), ('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Team')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='developer', name='team', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Team'), ), migrations.AddField( model_name='designer', name='team', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Team'), ), ]
41.817308
123
0.538285
401
4,349
5.673317
0.216958
0.055385
0.075165
0.087033
0.776264
0.763956
0.744176
0.744176
0.744176
0.744176
0
0.016762
0.314095
4,349
103
124
42.223301
0.745893
0.015176
0
0.726316
1
0
0.116822
0
0
0
0
0
0
1
0
false
0
0.031579
0
0.073684
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
6b588b5a0a7880eebca65bd76006430a81918205
142
py
Python
app/api/__init__.py
RomanBachaloSigmaSoftware/sample-app-mysure-python
51d10ad3382996c429c73e1823c9e1df5d599207
[ "MIT" ]
4
2020-12-03T23:28:48.000Z
2021-11-14T17:05:49.000Z
app/api/__init__.py
RomanBachaloSigmaSoftware/sample-app-mysure-python
51d10ad3382996c429c73e1823c9e1df5d599207
[ "MIT" ]
8
2020-12-15T20:11:56.000Z
2022-03-10T03:41:01.000Z
app/api/__init__.py
RomanBachaloSigmaSoftware/sample-app-mysure-python
51d10ad3382996c429c73e1823c9e1df5d599207
[ "MIT" ]
8
2020-05-17T14:31:16.000Z
2022-03-09T12:27:14.000Z
from app.api.clickwrap import clickwrap from app.api.common import common from app.api.requests import requests from app.api.auth import auth
28.4
39
0.830986
24
142
4.916667
0.333333
0.237288
0.338983
0
0
0
0
0
0
0
0
0
0.112676
142
4
40
35.5
0.936508
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
6b6c385e07c97b38425622a73e909c45610f3350
2,995
py
Python
server/processes/migrations/0103_auto_20200418_2054.py
CloudReactor/task_manager
464ca74371064fabb9a21b1f5bacba30360932ab
[ "Fair" ]
null
null
null
server/processes/migrations/0103_auto_20200418_2054.py
CloudReactor/task_manager
464ca74371064fabb9a21b1f5bacba30360932ab
[ "Fair" ]
6
2021-11-01T01:35:40.000Z
2022-02-11T03:33:06.000Z
server/processes/migrations/0103_auto_20200418_2054.py
CloudReactor/task_manager
464ca74371064fabb9a21b1f5bacba30360932ab
[ "Fair" ]
null
null
null
# Generated by Django 2.2.12 on 2020-04-18 20:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('processes', '0102_processexecution_heartbeat_interval_seconds'), ] operations = [ migrations.AddField( model_name='processexecution', name='api_max_retries', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='api_max_retries_for_final_update', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='api_max_retries_for_process_creation_conflict', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='api_retry_delay_seconds', field=models.PositiveIntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='api_timeout_seconds', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='command', field=models.CharField(blank=True, max_length=5000), ), migrations.AddField( model_name='processexecution', name='deployment', field=models.CharField(blank=True, max_length=200), ), migrations.AddField( model_name='processexecution', name='is_service', field=models.BooleanField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='is_status_listener_enabled', field=models.BooleanField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='max_conflicting_age_seconds', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='process_max_concurrency', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='process_max_retries', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='process_retry_delay_seconds', field=models.PositiveIntegerField(blank=True, null=True), ), migrations.AddField( model_name='processexecution', name='process_timeout_seconds', field=models.IntegerField(blank=True, null=True), ), ]
35.654762
75
0.580968
262
2,995
6.435115
0.236641
0.149466
0.190985
0.224199
0.827402
0.827402
0.799526
0.724199
0.724199
0.658363
0
0.013196
0.316861
2,995
83
76
36.084337
0.81085
0.015359
0
0.701299
1
0
0.204958
0.09567
0
0
0
0
0
1
0
false
0
0.012987
0
0.051948
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
2e24a2219bbb9d98aaaf627353bbd9dd57685acc
118
py
Python
test/__init__.py
jesopo/scpl
1fa5acfb468ab212276781fa1760bb5eda438c23
[ "MIT" ]
null
null
null
test/__init__.py
jesopo/scpl
1fa5acfb468ab212276781fa1760bb5eda438c23
[ "MIT" ]
2
2021-11-15T11:12:14.000Z
2021-11-15T17:35:27.000Z
test/__init__.py
jesopo/scpl
1fa5acfb468ab212276781fa1760bb5eda438c23
[ "MIT" ]
null
null
null
from .lexer import * from .eval import * from .parser import * from .parser_operators import * from .regex import *
14.75
31
0.728814
16
118
5.3125
0.4375
0.470588
0.376471
0
0
0
0
0
0
0
0
0
0.186441
118
7
32
16.857143
0.885417
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2e43778eb12b90847605bcf4dedc7c85859ff5f3
12,414
py
Python
tests/unit/pypyr/utils/poll_test.py
FooBarQuaxx/pypyr
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
[ "Apache-2.0" ]
null
null
null
tests/unit/pypyr/utils/poll_test.py
FooBarQuaxx/pypyr
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
[ "Apache-2.0" ]
null
null
null
tests/unit/pypyr/utils/poll_test.py
FooBarQuaxx/pypyr
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
[ "Apache-2.0" ]
null
null
null
"""poll.py unit tests.""" import logging from unittest.mock import call, MagicMock, patch import pypyr.utils.poll as poll # ----------------- wait_until_true ------------------------------------------- from tests.common.utils import patch_logger @patch('time.sleep') def test_wait_until_true_with_static_decorator(mock_time_sleep): """wait_until_true with static decorator.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'expected value', 'test string 5' ] @poll.wait_until_true(interval=0.01, max_attempts=10) def decorate_me(arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' if mock(arg1) == 'expected value': return True else: return False assert decorate_me('v1', 'v2') assert mock.call_count == 4 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 3 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_wait_until_true_invoke_inline(mock_time_sleep): """wait_until_true with dynamic invocation.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'expected value', 'test string 5' ] def decorate_me(arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' if mock(arg1) == 'expected value': return True else: return False assert poll.wait_until_true(interval=0.01, max_attempts=10)( decorate_me)('v1', 'v2') assert mock.call_count == 4 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 3 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_wait_until_true_with_timeout(mock_time_sleep): """wait_until_true with dynamic invocation, exhaust wait attempts.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'test string 4', 'test string 5', 'test string 6', 'test string 7', 'test string 8', 'test string 9', 'test string 10', 'test string 11', ] def decorate_me(arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' if mock(arg1) == 'expected value': return True else: return False assert not poll.wait_until_true(interval=0.01, max_attempts=10)( decorate_me)('v1', 'v2') assert mock.call_count == 10 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 9 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_wait_until_true_once_not_found(mock_time_sleep): """wait_until_true max_attempts 1.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', ] def decorate_me(arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' if mock(arg1) == 'expected value': return True else: return False assert not poll.wait_until_true(interval=0.01, max_attempts=1)( decorate_me)('v1', 'v2') mock.assert_called_once_with('v1') mock_time_sleep.assert_not_called() @patch('time.sleep') def test_wait_until_true_once_found(mock_time_sleep): """wait_until_true max_attempts 1.""" mock = MagicMock() mock.side_effect = [ 'expected value', 'test string 2', ] def decorate_me(arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' if mock(arg1) == 'expected value': return True else: return False assert poll.wait_until_true(interval=0.01, max_attempts=1)( decorate_me)('v1', 'v2') mock.assert_called_once_with('v1') mock_time_sleep.assert_not_called() # ----------------- wait_until_true ------------------------------------------- # ----------------- while_until_true ------------------------------------- @patch('time.sleep') def test_while_until_true_with_static_decorator(mock_time_sleep): """while_until_true with static decorator.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'expected value', 'test string 5' ] actual_counter = 0 @poll.while_until_true(interval=0.01, max_attempts=10) def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter if mock(arg1) == 'expected value': return True else: return False assert decorate_me('v1', 'v2') assert mock.call_count == 4 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 3 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_while_until_true_invoke_inline(mock_time_sleep): """while_until_true with dynamic invocation.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'expected value', 'test string 5' ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter if mock(arg1) == 'expected value': return True else: return False assert poll.while_until_true(interval=0.01, max_attempts=10)( decorate_me)('v1', 'v2') assert mock.call_count == 4 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 3 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_while_until_true_with_exhaust(mock_time_sleep): """while_until_true with dynamic invocation, exhaust wait attempts.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'test string 4', 'test string 5', 'test string 6', 'test string 7', 'test string 8', 'test string 9', 'test string 10', 'test string 11', ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter out = mock(arg1) assert out == f'test string {counter}' if out == 'expected value': return True else: return False assert not poll.while_until_true(interval=0.01, max_attempts=10)( decorate_me)('v1', 'v2') assert mock.call_count == 10 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 9 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_while_until_true_once_not_found(mock_time_sleep): """while_until_true max_attempts 1.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter if mock(arg1) == 'expected value': return True else: return False assert not poll.while_until_true(interval=0.01, max_attempts=1)( decorate_me)('v1', 'v2') mock.assert_called_once_with('v1') mock_time_sleep.assert_not_called() @patch('time.sleep') def test_while_until_true_once_found(mock_time_sleep): """wait_until_true max_attempts 1.""" mock = MagicMock() mock.side_effect = [ 'expected value', 'test string 2', ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter if mock(arg1) == 'expected value': return True else: return False assert poll.while_until_true(interval=0.01, max_attempts=1)( decorate_me)('v1', 'v2') mock.assert_called_once_with('v1') mock_time_sleep.assert_not_called() @patch('time.sleep') def test_while_until_true_no_max(mock_time_sleep): """while_until_true with dynamic invocation, infinite (max is None).""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', 'test string 4', 'test string 5', 'test string 6', 'test string 7', 'test string 8', 'test string 9', 'test string 10', 'test string 11', ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter out = mock(arg1) assert out == f'test string {counter}' if out == 'test string 11': return True else: return False with patch_logger('pypyr.utils.poll', logging.DEBUG) as mock_logger_debug: assert (poll.while_until_true(interval=0.01, max_attempts=None)(decorate_me)('v1', 'v2')) assert mock_logger_debug.mock_calls == [ call('started'), call('Looping every 0.01 seconds.'), call('iteration 1. Still waiting. . .'), call('iteration 2. Still waiting. . .'), call('iteration 3. Still waiting. . .'), call('iteration 4. Still waiting. . .'), call('iteration 5. Still waiting. . .'), call('iteration 6. Still waiting. . .'), call('iteration 7. Still waiting. . .'), call('iteration 8. Still waiting. . .'), call('iteration 9. Still waiting. . .'), call('iteration 10. Still waiting. . .'), call('iteration 11. Desired state reached.'), call('done')] assert mock.call_count == 11 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 10 mock_time_sleep.assert_called_with(0.01) @patch('time.sleep') def test_while_until_true_max_exhaust(mock_time_sleep): """while_until_true with dynamic invocation, exhaust max.""" mock = MagicMock() mock.side_effect = [ 'test string 1', 'test string 2', 'test string 3', ] actual_counter = 0 def decorate_me(counter, arg1, arg2): """Test static decorator syntax.""" assert arg1 == 'v1' assert arg2 == 'v2' nonlocal actual_counter actual_counter += 1 assert actual_counter == counter out = mock(arg1) assert out == f'test string {counter}' return False with patch_logger('pypyr.utils.poll', logging.DEBUG) as mock_logger_debug: assert not (poll.while_until_true(interval=0.01, max_attempts=3)(decorate_me)('v1', 'v2')) assert mock_logger_debug.mock_calls == [ call('started'), call('Looping every 0.01 seconds for 3 attempts'), call('iteration 1. Still waiting. . .'), call('iteration 2. Still waiting. . .'), call('iteration 3. Max attempts exhausted.'), call('done')] assert mock.call_count == 3 mock.assert_called_with('v1') assert mock_time_sleep.call_count == 2 mock_time_sleep.assert_called_with(0.01) # ----------------- while_until_true -------------------------------------
29.278302
79
0.582971
1,508
12,414
4.570292
0.06366
0.089959
0.06036
0.0296
0.926872
0.92484
0.91628
0.911492
0.895241
0.866657
0
0.036561
0.286129
12,414
423
80
29.347518
0.741142
0.098518
0
0.81982
0
0
0.168866
0
0
0
0
0
0.264264
1
0.072072
false
0
0.012012
0
0.153153
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2e43fc1be571c9fed0404d0b65d021a29b3ee8a0
18,127
py
Python
mlearner/preprocessing/replace_na.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
6
2020-04-16T22:36:14.000Z
2020-04-25T14:34:47.000Z
mlearner/preprocessing/replace_na.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
9
2020-04-16T18:25:37.000Z
2020-05-03T17:24:36.000Z
mlearner/preprocessing/replace_na.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
1
2020-04-18T17:29:42.000Z
2020-04-18T17:29:42.000Z
"""Jaime Sendra Berenguer-2020. MLearner Machine Learning Library Extensions Author:Jaime Sendra Berenguer<www.linkedin.com/in/jaisenbe> License: MIT """ import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin class FillNaTransformer_median(BaseEstimator, TransformerMixin): """This transformer handles missing values. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_median/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) self.train_median = X[self.columns].median() return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "train_median"): raise AttributeError("FillNaTransformer_median has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X[self.columns] = X[self.columns].fillna(self.train_median) return X class FillNaTransformer_mean(BaseEstimator, TransformerMixin): """This transformer handles missing values. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_mean/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) self.train_mean = X[self.columns].mean() return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "train_mean"): raise AttributeError("FillNaTransformer_median has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X[self.columns] = X[self.columns].fillna(self.train_mean) return X class FillNaTransformer_idmax(BaseEstimator, TransformerMixin): """This transformer handles missing values for idmax. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_idmax/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if self.columns is None: # self.columns = X.select_dtypes(exclude=["object"]).columnsX.select_dtypes(exclude=["object"]).columns self.columns = X.columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) self._fitted = True return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "_fitted"): raise AttributeError("FillNaTransformer_idmax has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) for col in self.columns: X[col] = X[col].fillna(X[col].value_counts().idxmax()) return X class FillNaTransformer_any(BaseEstimator, TransformerMixin): """This transformer delete row that there is some NaN. Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_any/ """ def __init__(self): """Init replace missing values.""" def fit(self, X, y=None, **fit_params): """Not implemented. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ return self def transform(self, X): """This transformer delete row that there is some NaN Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) self.columns = X.columns X.dropna(axis=0, how="any", inplace=True) # X.reset_index() # X.drop(["index"], axis=1) return X class FillNaTransformer_all(BaseEstimator, TransformerMixin): """This transformer delete row that there is all NaN. Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_all/ """ def __init__(self): """Init replace missing values.""" def fit(self, X, y=None, **fit_params): """Not implemented. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ return self def transform(self, X): """This transformer delete row that there is some NaN Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) self.columns = X.columns X.dropna(axis=0, how="all", inplace=True) # X.reset_index() # X.drop(["index"], axis=1, inplace=True) return X class FillNaTransformer_value(BaseEstimator, TransformerMixin): """This transformer handles missing values. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_value/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, value=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if value is not None: if (not isinstance(value, bool) and not isinstance(value, int) and not isinstance(value, float) and not isinstance(value, str)): raise NameError("Type value not permited: {}".format(type(value))) else: self.value = value else: raise NameError("Not alowed value=None") if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "value"): raise AttributeError("FillNaTransformer_value has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X[self.columns] = X[self.columns].fillna(self.value) return X class FillNaTransformer_backward(BaseEstimator, TransformerMixin): """This transformer handles missing values closer backward. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_backward/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if self.columns is None: self.columns = X.columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) self._fitted = True return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "_fitted"): raise AttributeError("FillNaTransformer_backward has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X_transform = X.copy() X_transform[self.columns] = X[self.columns].fillna(method="bfill") return X_transform class FillNaTransformer_forward(BaseEstimator, TransformerMixin): """This transformer handles missing values closer forward. Attributes ---------- columns: list of columns to transformer [n_columns] Examples -------- For usage examples, please see https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FillNaTransformer_forward/ """ def __init__(self, columns=None): """Init replace missing values.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise NameError("Invalid type {}".format(type(columns))) else: self.columns = columns def fit(self, X, y=None, **fit_params): """Gets the columns to make a replace missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) if self.columns is None: self.columns = X.columns _lista = [i for i in self.columns if i not in X.columns.tolist()] if len(_lista) > 0: raise NameError("The columns {} no exist in Dataframe".format(_lista)) self._fitted = True return self def transform(self, X): """this transformer handles missing values. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {Dataframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns replaced. """ if not hasattr(self, "_fitted"): raise AttributeError("FillNaTransformer_backward has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X_transform = X.copy() X_transform[self.columns] = X[self.columns].fillna(method="ffill") return X_transform
30.619932
115
0.591218
2,081
18,127
5.05382
0.072081
0.054388
0.03347
0.039555
0.907103
0.90406
0.90406
0.897975
0.884663
0.872112
0
0.002353
0.29674
18,127
591
116
30.671743
0.822639
0.402273
0
0.824468
0
0
0.103426
0.015928
0
0
0
0
0
1
0.12766
false
0
0.010638
0
0.265957
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2e90cadea406e869651af5bcac5b45acadc3c7e1
160
py
Python
profit/__init__.py
manal44/profit
a05a2eb0a5a14c36edc46cadfccce3f43fcc1a4e
[ "MIT" ]
null
null
null
profit/__init__.py
manal44/profit
a05a2eb0a5a14c36edc46cadfccce3f43fcc1a4e
[ "MIT" ]
null
null
null
profit/__init__.py
manal44/profit
a05a2eb0a5a14c36edc46cadfccce3f43fcc1a4e
[ "MIT" ]
null
null
null
from profit.main import * from profit.config import * from profit.post import * from profit.pre import * from profit.run.run import * from profit.util import *
22.857143
28
0.76875
25
160
4.92
0.36
0.487805
0.650407
0
0
0
0
0
0
0
0
0
0.15
160
6
29
26.666667
0.904412
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
cf228b069fed563cbebff0acfabb39c59989e225
65,299
py
Python
infoblox_netmri/api/broker/v3_6_0/nios_grid_member_broker.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
infoblox_netmri/api/broker/v3_6_0/nios_grid_member_broker.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
infoblox_netmri/api/broker/v3_6_0/nios_grid_member_broker.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
from ..broker import Broker class NiosGridMemberBroker(Broker): controller = "nios_grid_members" def index(self, **kwargs): """Lists the available nios grid members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which NIOS GridMember entry was collected. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which NIOS GridMember entry was collected. :type DeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberDeviceID: The internal NetMRI identifier of each device in the NIOS GridMember. :type GridMemberDeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberDeviceID: The internal NetMRI identifier of each device in the NIOS GridMember. :type GridMemberDeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberIPDotted: The management IP address of the switch, in dotted (or colon-delimited for IPv6) format. :type GridMemberIPDotted: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberIPDotted: The management IP address of the switch, in dotted (or colon-delimited for IPv6) format. :type GridMemberIPDotted: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the nios grid members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of nios grid member methods. The listed methods will be called on each nios grid member returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` GridMemberStatusID :param sort: The data field(s) to use for sorting the output. Default is GridMemberStatusID. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each NiosGridMember. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return nios_grid_members: An array of the NiosGridMember objects that match the specified input criteria. :rtype nios_grid_members: Array of NiosGridMember """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def show(self, **kwargs): """Shows the details for the specified nios grid member. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of nios grid member methods. The listed methods will be called on each nios grid member returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return nios_grid_member: The nios grid member identified by the specified GridMemberStatusID. :rtype nios_grid_member: NiosGridMember """ return self.api_request(self._get_method_fullname("show"), kwargs) def search(self, **kwargs): """Lists the available nios grid members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. :type DataSourceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. :type DataSourceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which NIOS GridMember entry was collected. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which NIOS GridMember entry was collected. :type DeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberChangedCols: The fields that changed between this revision of the record and the previous revision. :type GridMemberChangedCols: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberChangedCols: The fields that changed between this revision of the record and the previous revision. :type GridMemberChangedCols: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberDeviceID: The internal NetMRI identifier of each device in the NIOS GridMember. :type GridMemberDeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberDeviceID: The internal NetMRI identifier of each device in the NIOS GridMember. :type GridMemberDeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberEndTime: The ending effective time of this record, or empty if still in effect. :type GridMemberEndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberEndTime: The ending effective time of this record, or empty if still in effect. :type GridMemberEndTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberFirstSeenTime: The timestamp of when NetMRI first discovered this interface in the NIOS GridMember. :type GridMemberFirstSeenTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberFirstSeenTime: The timestamp of when NetMRI first discovered this interface in the NIOS GridMember. :type GridMemberFirstSeenTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberIPDotted: The management IP address of the switch, in dotted (or colon-delimited for IPv6) format. :type GridMemberIPDotted: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberIPDotted: The management IP address of the switch, in dotted (or colon-delimited for IPv6) format. :type GridMemberIPDotted: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberIPNumeric: The numerical value of the GridMember. :type GridMemberIPNumeric: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberIPNumeric: The numerical value of the GridMember. :type GridMemberIPNumeric: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberLastRepTimeFromMaster: The last response time returned from master in the NIOS GridMember. :type GridMemberLastRepTimeFromMaster: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberLastRepTimeFromMaster: The last response time returned from master in the NIOS GridMember. :type GridMemberLastRepTimeFromMaster: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberLastRepTimeToMaster: The last response time sent to master of the NIOS grid member. :type GridMemberLastRepTimeToMaster: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberLastRepTimeToMaster: The last response time sent to master of the NIOS grid member. :type GridMemberLastRepTimeToMaster: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberQueueFromMaster: The grid member queue return from master in the NIOS GridMember. :type GridMemberQueueFromMaster: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberQueueFromMaster: The grid member queue return from master in the NIOS GridMember. :type GridMemberQueueFromMaster: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberQueueToMaster: The grid member queue sent to master in the NIOs GridMember. :type GridMemberQueueToMaster: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberQueueToMaster: The grid member queue sent to master in the NIOs GridMember. :type GridMemberQueueToMaster: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberStartTime: The starting effective time of this record. :type GridMemberStartTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberStartTime: The starting effective time of this record. :type GridMemberStartTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberStatus: The status of the NIOS GridMember. :type GridMemberStatus: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberStatus: The status of the NIOS GridMember. :type GridMemberStatus: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GridMemberTimestamp: The date and time this record was collected or calculated. :type GridMemberTimestamp: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GridMemberTimestamp: The date and time this record was collected or calculated. :type GridMemberTimestamp: Array of DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the nios grid members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of nios grid member methods. The listed methods will be called on each nios grid member returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` GridMemberStatusID :param sort: The data field(s) to use for sorting the output. Default is GridMemberStatusID. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each NiosGridMember. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against nios grid members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, GridMemberChangedCols, GridMemberDeviceID, GridMemberEndTime, GridMemberFirstSeenTime, GridMemberIPDotted, GridMemberIPNumeric, GridMemberLastRepTimeFromMaster, GridMemberLastRepTimeToMaster, GridMemberQueueFromMaster, GridMemberQueueToMaster, GridMemberStartTime, GridMemberStatus, GridMemberStatusID, GridMemberTimestamp. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return nios_grid_members: An array of the NiosGridMember objects that match the specified input criteria. :rtype nios_grid_members: Array of NiosGridMember """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available nios grid members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, GridMemberChangedCols, GridMemberDeviceID, GridMemberEndTime, GridMemberFirstSeenTime, GridMemberIPDotted, GridMemberIPNumeric, GridMemberLastRepTimeFromMaster, GridMemberLastRepTimeToMaster, GridMemberQueueFromMaster, GridMemberQueueToMaster, GridMemberStartTime, GridMemberStatus, GridMemberStatusID, GridMemberTimestamp. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified. :type val_f_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified. :type val_c_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which NIOS GridMember entry was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified. :type val_f_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified. :type val_c_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberChangedCols: The operator to apply to the field GridMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberChangedCols: If op_GridMemberChangedCols is specified, the field named in this input will be compared to the value in GridMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberChangedCols must be specified if op_GridMemberChangedCols is specified. :type val_f_GridMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberChangedCols: If op_GridMemberChangedCols is specified, this value will be compared to the value in GridMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberChangedCols must be specified if op_GridMemberChangedCols is specified. :type val_c_GridMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberDeviceID: The operator to apply to the field GridMemberDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberDeviceID: The internal NetMRI identifier of each device in the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberDeviceID: If op_GridMemberDeviceID is specified, the field named in this input will be compared to the value in GridMemberDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberDeviceID must be specified if op_GridMemberDeviceID is specified. :type val_f_GridMemberDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberDeviceID: If op_GridMemberDeviceID is specified, this value will be compared to the value in GridMemberDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberDeviceID must be specified if op_GridMemberDeviceID is specified. :type val_c_GridMemberDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberEndTime: The operator to apply to the field GridMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberEndTime: If op_GridMemberEndTime is specified, the field named in this input will be compared to the value in GridMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberEndTime must be specified if op_GridMemberEndTime is specified. :type val_f_GridMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberEndTime: If op_GridMemberEndTime is specified, this value will be compared to the value in GridMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberEndTime must be specified if op_GridMemberEndTime is specified. :type val_c_GridMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberFirstSeenTime: The operator to apply to the field GridMemberFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberFirstSeenTime: The timestamp of when NetMRI first discovered this interface in the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberFirstSeenTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberFirstSeenTime: If op_GridMemberFirstSeenTime is specified, the field named in this input will be compared to the value in GridMemberFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberFirstSeenTime must be specified if op_GridMemberFirstSeenTime is specified. :type val_f_GridMemberFirstSeenTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberFirstSeenTime: If op_GridMemberFirstSeenTime is specified, this value will be compared to the value in GridMemberFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberFirstSeenTime must be specified if op_GridMemberFirstSeenTime is specified. :type val_c_GridMemberFirstSeenTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberIPDotted: The operator to apply to the field GridMemberIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberIPDotted: The management IP address of the switch, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberIPDotted: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberIPDotted: If op_GridMemberIPDotted is specified, the field named in this input will be compared to the value in GridMemberIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberIPDotted must be specified if op_GridMemberIPDotted is specified. :type val_f_GridMemberIPDotted: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberIPDotted: If op_GridMemberIPDotted is specified, this value will be compared to the value in GridMemberIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberIPDotted must be specified if op_GridMemberIPDotted is specified. :type val_c_GridMemberIPDotted: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberIPNumeric: The operator to apply to the field GridMemberIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberIPNumeric: The numerical value of the GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberIPNumeric: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberIPNumeric: If op_GridMemberIPNumeric is specified, the field named in this input will be compared to the value in GridMemberIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberIPNumeric must be specified if op_GridMemberIPNumeric is specified. :type val_f_GridMemberIPNumeric: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberIPNumeric: If op_GridMemberIPNumeric is specified, this value will be compared to the value in GridMemberIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberIPNumeric must be specified if op_GridMemberIPNumeric is specified. :type val_c_GridMemberIPNumeric: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberLastRepTimeFromMaster: The operator to apply to the field GridMemberLastRepTimeFromMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberLastRepTimeFromMaster: The last response time returned from master in the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberLastRepTimeFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberLastRepTimeFromMaster: If op_GridMemberLastRepTimeFromMaster is specified, the field named in this input will be compared to the value in GridMemberLastRepTimeFromMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberLastRepTimeFromMaster must be specified if op_GridMemberLastRepTimeFromMaster is specified. :type val_f_GridMemberLastRepTimeFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberLastRepTimeFromMaster: If op_GridMemberLastRepTimeFromMaster is specified, this value will be compared to the value in GridMemberLastRepTimeFromMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberLastRepTimeFromMaster must be specified if op_GridMemberLastRepTimeFromMaster is specified. :type val_c_GridMemberLastRepTimeFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberLastRepTimeToMaster: The operator to apply to the field GridMemberLastRepTimeToMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberLastRepTimeToMaster: The last response time sent to master of the NIOS grid member. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberLastRepTimeToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberLastRepTimeToMaster: If op_GridMemberLastRepTimeToMaster is specified, the field named in this input will be compared to the value in GridMemberLastRepTimeToMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberLastRepTimeToMaster must be specified if op_GridMemberLastRepTimeToMaster is specified. :type val_f_GridMemberLastRepTimeToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberLastRepTimeToMaster: If op_GridMemberLastRepTimeToMaster is specified, this value will be compared to the value in GridMemberLastRepTimeToMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberLastRepTimeToMaster must be specified if op_GridMemberLastRepTimeToMaster is specified. :type val_c_GridMemberLastRepTimeToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberQueueFromMaster: The operator to apply to the field GridMemberQueueFromMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberQueueFromMaster: The grid member queue return from master in the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberQueueFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberQueueFromMaster: If op_GridMemberQueueFromMaster is specified, the field named in this input will be compared to the value in GridMemberQueueFromMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberQueueFromMaster must be specified if op_GridMemberQueueFromMaster is specified. :type val_f_GridMemberQueueFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberQueueFromMaster: If op_GridMemberQueueFromMaster is specified, this value will be compared to the value in GridMemberQueueFromMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberQueueFromMaster must be specified if op_GridMemberQueueFromMaster is specified. :type val_c_GridMemberQueueFromMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberQueueToMaster: The operator to apply to the field GridMemberQueueToMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberQueueToMaster: The grid member queue sent to master in the NIOs GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberQueueToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberQueueToMaster: If op_GridMemberQueueToMaster is specified, the field named in this input will be compared to the value in GridMemberQueueToMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberQueueToMaster must be specified if op_GridMemberQueueToMaster is specified. :type val_f_GridMemberQueueToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberQueueToMaster: If op_GridMemberQueueToMaster is specified, this value will be compared to the value in GridMemberQueueToMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberQueueToMaster must be specified if op_GridMemberQueueToMaster is specified. :type val_c_GridMemberQueueToMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberStartTime: The operator to apply to the field GridMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberStartTime: If op_GridMemberStartTime is specified, the field named in this input will be compared to the value in GridMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberStartTime must be specified if op_GridMemberStartTime is specified. :type val_f_GridMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberStartTime: If op_GridMemberStartTime is specified, this value will be compared to the value in GridMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberStartTime must be specified if op_GridMemberStartTime is specified. :type val_c_GridMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberStatus: The operator to apply to the field GridMemberStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberStatus: The status of the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberStatus: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberStatus: If op_GridMemberStatus is specified, the field named in this input will be compared to the value in GridMemberStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberStatus must be specified if op_GridMemberStatus is specified. :type val_f_GridMemberStatus: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberStatus: If op_GridMemberStatus is specified, this value will be compared to the value in GridMemberStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberStatus must be specified if op_GridMemberStatus is specified. :type val_c_GridMemberStatus: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberStatusID: The operator to apply to the field GridMemberStatusID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberStatusID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberStatusID: If op_GridMemberStatusID is specified, the field named in this input will be compared to the value in GridMemberStatusID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberStatusID must be specified if op_GridMemberStatusID is specified. :type val_f_GridMemberStatusID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberStatusID: If op_GridMemberStatusID is specified, this value will be compared to the value in GridMemberStatusID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberStatusID must be specified if op_GridMemberStatusID is specified. :type val_c_GridMemberStatusID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GridMemberTimestamp: The operator to apply to the field GridMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GridMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GridMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GridMemberTimestamp: If op_GridMemberTimestamp is specified, the field named in this input will be compared to the value in GridMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GridMemberTimestamp must be specified if op_GridMemberTimestamp is specified. :type val_f_GridMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GridMemberTimestamp: If op_GridMemberTimestamp is specified, this value will be compared to the value in GridMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GridMemberTimestamp must be specified if op_GridMemberTimestamp is specified. :type val_c_GridMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the nios grid members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of nios grid member methods. The listed methods will be called on each nios grid member returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` GridMemberStatusID :param sort: The data field(s) to use for sorting the output. Default is GridMemberStatusID. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each NiosGridMember. Valid values are DataSourceID, GridMemberStatusID, GridMemberStartTime, GridMemberEndTime, GridMemberChangedCols, GridMemberTimestamp, GridMemberFirstSeenTime, DeviceID, GridMemberDeviceID, GridMemberIPDotted, GridMemberIPNumeric, GridMemberStatus, GridMemberQueueFromMaster, GridMemberLastRepTimeFromMaster, GridMemberQueueToMaster, GridMemberLastRepTimeToMaster. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return nios_grid_members: An array of the NiosGridMember objects that match the specified input criteria. :rtype nios_grid_members: Array of NiosGridMember """ return self.api_list_request(self._get_method_fullname("find"), kwargs) def data_source(self, **kwargs): """The collector NetMRI that collected this data record. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return : The collector NetMRI that collected this data record. :rtype : DataSource """ return self.api_request(self._get_method_fullname("data_source"), kwargs) def device(self, **kwargs): """The device from which this data was collected. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return : The device from which this data was collected. :rtype : Device """ return self.api_request(self._get_method_fullname("device"), kwargs) def infradevice(self, **kwargs): """The device from which this data was collected. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param GridMemberStatusID: The internal NetMRI identifier of the status in the NIOS GridMember. :type GridMemberStatusID: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return : The device from which this data was collected. :rtype : InfraDevice """ return self.api_request(self._get_method_fullname("infradevice"), kwargs)
54.461218
762
0.626947
7,429
65,299
5.462512
0.041325
0.067519
0.043888
0.071561
0.949484
0.948695
0.914221
0.901506
0.893078
0.893078
0
0.004428
0.294537
65,299
1,199
763
54.461218
0.876503
0.826123
0
0
0
0
0.053827
0
0
0
0
0
0
1
0.411765
false
0
0.058824
0
1
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
8
cf2dc22cc4d04d198fe1745a55b448033abbfafa
205
py
Python
src/pretix/helpers/debug.py
alainrk/pretix
867a8132aa1ed73dd9513efae5b3c46b5bbae140
[ "ECL-2.0", "Apache-2.0" ]
1
2021-08-31T13:16:55.000Z
2021-08-31T13:16:55.000Z
src/pretix/helpers/debug.py
alainrk/pretix
867a8132aa1ed73dd9513efae5b3c46b5bbae140
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/pretix/helpers/debug.py
alainrk/pretix
867a8132aa1ed73dd9513efae5b3c46b5bbae140
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
from debug_toolbar.middleware import DebugToolbarMiddleware from django.utils.deprecation import MiddlewareMixin class DebugMiddlewareCompatibilityShim(MiddlewareMixin, DebugToolbarMiddleware): pass
29.285714
80
0.878049
17
205
10.529412
0.764706
0
0
0
0
0
0
0
0
0
0
0
0.087805
205
6
81
34.166667
0.957219
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
7
cf64e897f622f8f3a074fa029e115fc429591fcf
369
py
Python
mlflow/models/evaluation/__init__.py
devlibx/mlflowx
291c51161ec26450b1e79c8e4a32af960da79591
[ "Apache-2.0" ]
1
2022-01-07T05:51:52.000Z
2022-01-07T05:51:52.000Z
mlflow/models/evaluation/__init__.py
devlibx/mlflowx
291c51161ec26450b1e79c8e4a32af960da79591
[ "Apache-2.0" ]
null
null
null
mlflow/models/evaluation/__init__.py
devlibx/mlflowx
291c51161ec26450b1e79c8e4a32af960da79591
[ "Apache-2.0" ]
null
null
null
from mlflow.models.evaluation.base import ( ModelEvaluator, EvaluationDataset, EvaluationResult, EvaluationMetrics, EvaluationArtifact, evaluate, list_evaluators, ) __all__ = [ "ModelEvaluator", "EvaluationDataset", "EvaluationResult", "EvaluationMetrics", "EvaluationArtifact", "evaluate", "list_evaluators", ]
18.45
43
0.685637
23
369
10.73913
0.652174
0.251012
0.380567
0.518219
0.842105
0.842105
0.842105
0.842105
0
0
0
0
0.216802
369
19
44
19.421053
0.854671
0
0
0
0
0
0.284553
0
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
1
0
1
null
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
d8624a6e39889501aae808d4d055de567b890b3c
211
py
Python
primeiros exercicios/sen.cos.tan.py
Pedroluis1/python
d949fa2646c049aa51a41a32dc62de7b14eae90f
[ "MIT" ]
null
null
null
primeiros exercicios/sen.cos.tan.py
Pedroluis1/python
d949fa2646c049aa51a41a32dc62de7b14eae90f
[ "MIT" ]
null
null
null
primeiros exercicios/sen.cos.tan.py
Pedroluis1/python
d949fa2646c049aa51a41a32dc62de7b14eae90f
[ "MIT" ]
null
null
null
import math an = float(input('Digite um ângulo: ')) print(f'O ângulo tem o seno de {math.sin(math.radians(an)):.2f}, o Coseno de {math.cos(math.radians(an)):.2f} e a tangente é {math.tan(math.radians(an)):.2f}')
70.333333
159
0.687204
41
211
3.536585
0.585366
0.227586
0.268966
0.310345
0
0
0
0
0
0
0
0.015957
0.109005
211
3
159
70.333333
0.755319
0
0
0
0
0.333333
0.787736
0.457547
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
1
0
0
0
0
8
d8d70a31c00da1605870c367d3ca161401bd6a43
8,044
py
Python
flatten_test.py
dcirne/flatten
0806eb9789d461cb6a0fdb6a12b7021e8debbf90
[ "Apache-2.0" ]
null
null
null
flatten_test.py
dcirne/flatten
0806eb9789d461cb6a0fdb6a12b7021e8debbf90
[ "Apache-2.0" ]
null
null
null
flatten_test.py
dcirne/flatten
0806eb9789d461cb6a0fdb6a12b7021e8debbf90
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Dalmo Cirne # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import flatten # Python module containing the C++ implementation import flatten_python # Pure Python implementation import json import pytest def test_flatten_python(): with open('sample.json', 'r') as json_file: json_data = json_file.read() with open('validation.json', 'r') as validation_file: validation_json = validation_file.read() flatten_arrays = False flatten_str = flatten_python.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 10) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == ["Non-Fiction", "Technology"]) assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42) assert(flatten_dict["genre"] == "Unknown") def test_flatten_cpp(): with open('sample.json', 'r') as json_file: json_data = json_file.read() with open('validation.json', 'r') as validation_file: validation_json = validation_file.read() flatten_arrays = False flatten_str = flatten.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 10) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == ["Non-Fiction", "Technology"]) assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42) assert(flatten_dict["genre"] == "Unknown") def test_flatten_python_including_arrays(): with open('sample.json', 'r') as json_file: json_data = json_file.read() with open('validation.json', 'r') as validation_file: validation_json = validation_file.read() flatten_arrays = True flatten_str = flatten_python.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 10) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == "Non-Fiction") assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42) assert(flatten_dict["genre"] == "Unknown") def test_flatten_cpp_including_arrays(): with open('sample.json', 'r') as json_file: json_data = json_file.read() with open('validation.json', 'r') as validation_file: validation_json = validation_file.read() flatten_arrays = True flatten_str = flatten.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 10) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == "Non-Fiction") assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42) assert(flatten_dict["genre"] == "Unknown") def test_invalid_json_python(): json_data = '{"invalid":' with pytest.raises(Exception): flatten_str = flatten_python.flatten_json(json_data, False, "") assert(len(flatten_str) == 0) assert(flatten_str == "") def test_invalid_json_cpp(): json_data = '{"invalid":' flatten_str = flatten.flatten_json(json_data, False, "") assert(len(flatten_str) == 0) assert(flatten_str == "") def test_empty_json_python(): json_data = "" with pytest.raises(Exception): flatten_str = flatten_python.flatten_json(json_data, False, "") assert(len(flatten_str) == 0) assert(flatten_str == "") def test_empty_json_cpp(): json_data = "" flatten_str = flatten.flatten_json(json_data, False, "") assert(len(flatten_str) == 0) assert(flatten_str == "") def test_invalid_validation_python(): with open('sample.json', 'r') as json_file: json_data = json_file.read() validation_json = '[{"name":"missing_field_with_no_default"}]' flatten_arrays = True flatten_str = flatten_python.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) assert(flatten_str == "") def test_invalid_validation_cpp(): with open('sample.json', 'r') as json_file: json_data = json_file.read() validation_json = '[{"name":"missing_field_with_no_default"}]' flatten_arrays = True flatten_str = flatten.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) assert(flatten_str == "") def test_empty_validation_python(): with open('sample.json', 'r') as json_file: json_data = json_file.read() validation_json = "" flatten_arrays = False flatten_str = flatten_python.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 9) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == ["Non-Fiction", "Technology"]) assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42) def test_empty_validation_cpp(): with open('sample.json', 'r') as json_file: json_data = json_file.read() validation_json = "" flatten_arrays = False flatten_str = flatten.flatten_json(json_data, flatten_arrays, validation_json) assert(flatten_str is not None) flatten_dict = json.loads(flatten_str) assert(len(flatten_dict) == 9) assert(flatten_dict["isbn"] == "123-456-222") assert(flatten_dict["author.lastname"] == "Doe") assert(flatten_dict["author.firstname"] == "Jane") assert(flatten_dict["editor.lastname"] == "Smith") assert(flatten_dict["editor.firstname"] == "Jane") assert(flatten_dict["title"] == "The Ultimate Book in the Universe") assert(flatten_dict["category"] == ["Non-Fiction", "Technology"]) assert(flatten_dict["price"] == 3.14) assert(flatten_dict["Number of pages"] == 42)
38.859903
89
0.691447
1,048
8,044
5.069656
0.124046
0.176172
0.185583
0.042914
0.874271
0.874271
0.87333
0.872012
0.872012
0.872012
0
0.015819
0.166957
8,044
206
90
39.048544
0.777048
0.077449
0
0.898734
0
0
0.18555
0.011344
0
0
0
0
0.518987
1
0.075949
false
0
0.025316
0
0.101266
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
2b14dc79b9108e19c222929eb02014d6730f5b90
448
py
Python
output/models/saxon_data/cta/cta0006_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/saxon_data/cta/cta0006_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/saxon_data/cta/cta0006_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from output.models.saxon_data.cta.cta0006_xsd.cta0006 import ( DateMessageType, DecimalMessageType, IntMessageType, Message, MessageType, Messages, QNameMessageType, StringMessageType, TimeMessageType, ) __all__ = [ "DateMessageType", "DecimalMessageType", "IntMessageType", "Message", "MessageType", "Messages", "QNameMessageType", "StringMessageType", "TimeMessageType", ]
18.666667
62
0.674107
29
448
10.206897
0.655172
0.222973
0.317568
0.364865
0.817568
0.817568
0.817568
0.817568
0.817568
0
0
0.022989
0.223214
448
23
63
19.478261
0.827586
0
0
0
0
0
0.270089
0
0
0
0
0
0
1
0
false
0
0.045455
0
0.045455
0
1
0
1
null
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
5111b7870e682c1db9ccf1fb8ffd180fe473ce6f
51,177
py
Python
application/src/pytest/python/modules/user_profiles/routes_admin_test.py
okebinda/base.api.python
fdf6dc02ab73d588919f38d6017788f7822cfd04
[ "Apache-2.0" ]
null
null
null
application/src/pytest/python/modules/user_profiles/routes_admin_test.py
okebinda/base.api.python
fdf6dc02ab73d588919f38d6017788f7822cfd04
[ "Apache-2.0" ]
2
2021-06-02T03:26:04.000Z
2021-09-30T03:04:00.000Z
application/src/pytest/python/modules/user_profiles/routes_admin_test.py
okebinda/base.api.python
fdf6dc02ab73d588919f38d6017788f7822cfd04
[ "Apache-2.0" ]
null
null
null
from copy import copy import re import base64 import pytest from werkzeug.exceptions import NotFound, Unauthorized from sqlalchemy.orm.exc import NoResultFound from fixtures import Fixtures from app import create_app from config import Config from modules.user_profiles.routes_admin import get_user_profiles, \ post_user_profiles, get_user_profile, put_user_profile, delete_user_profile from modules.user_profiles.model import UserProfile from modules.users.model import User from modules.app_keys.model import AppKey from modules.administrators.model import Administrator from modules.roles.model import Role @pytest.fixture def app(request): config = copy(Config) config.TESTING = True config.APP_TYPE = 'admin' if 'admin_api' in request.keywords else 'public' app = create_app(config) if 'unit' in request.keywords: yield app else: fixtures = Fixtures(app) fixtures.setup() yield app fixtures.teardown() # UNIT TESTS @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles(app, mocker): expected_status = 200 expected_length = 2 expected_json = { 'created_at': None, 'first_name': None, 'id': None, 'joined_at': None, 'last_name': None, 'status': None, 'status_changed_at': None, 'user_id': None, 'updated_at': None, } expected_limit = 10 expected_page = 1 expected_total = 2 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [UserProfile()] * expected_length query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = expected_total result = get_user_profiles() assert result[1] == expected_status assert len(result[0].json['user_profiles']) == expected_length assert result[0].json['user_profiles'][0] == expected_json assert result[0].json['limit'] == expected_limit assert result[0].json['page'] == expected_page assert result[0].json['total'] == expected_total @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_limit_10_page_2_of_3(app, mocker): expected_status = 200 expected_length = 10 expected_json = { 'created_at': None, 'first_name': None, 'id': None, 'joined_at': None, 'last_name': None, 'status': None, 'status_changed_at': None, 'user_id': None, 'updated_at': None, } expected_limit = 10 expected_page = 2 expected_total = 25 expected_previous_uri = 'http://localhost/user_profiles/1/10' expected_next_uri = 'http://localhost/user_profiles/3/10' query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [UserProfile()] * expected_length query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = expected_total result = get_user_profiles(expected_page, expected_limit) assert result[1] == expected_status assert len(result[0].json['user_profiles']) == expected_length assert result[0].json['user_profiles'][0] == expected_json assert result[0].json['previous_uri'] == expected_previous_uri assert result[0].json['next_uri'] == expected_next_uri assert result[0].json['limit'] == expected_limit assert result[0].json['page'] == expected_page assert result[0].json['total'] == expected_total @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_empty(app, mocker): expected_status = 204 expected_content = '' query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [] query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = 15 result = get_user_profiles(5, 10) assert result[1] == expected_status assert result[0] == expected_content @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_route(app, mocker, client): expected_status = 200 expected_length = 10 expected_limit = 10 expected_page = 1 expected_total = 15 expected_next_uri = 'http://localhost/user_profiles/2/10' query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [UserProfile()] * expected_length query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = expected_total # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get("/user_profiles?app_key=123", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert len(response.json['user_profiles']) == expected_length assert response.json['limit'] == expected_limit assert response.json['page'] == expected_page assert response.json['total'] == expected_total assert response.json['next_uri'] == expected_next_uri @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_limit_5_page_2_of_3_route(app, mocker, client): expected_status = 200 expected_length = 5 expected_limit = 5 expected_page = 2 expected_total = 12 expected_next_uri = 'http://localhost/user_profiles/3/5' expected_previous_uri = 'http://localhost/user_profiles/1/5' query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [UserProfile()] * expected_length query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = expected_total # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get( "/user_profiles/{}/{}?app_key=123".format(expected_page, expected_limit), headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert len(response.json['user_profiles']) == expected_length assert response.json['limit'] == expected_limit assert response.json['page'] == expected_page assert response.json['total'] == expected_total assert response.json['next_uri'] == expected_next_uri assert response.json['previous_uri'] == expected_previous_uri @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_empty_route(app, mocker, client): expected_status = 204 expected_json = None query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .limit.return_value \ .offset.return_value \ .__iter__.return_value = [] query_mock.return_value \ .filter.return_value \ .order_by.return_value \ .count.return_value = 15 # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get("/user_profiles/3?app_key=123", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert response.json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_route_no_app_key(app, client): expected_status = 401 response = client.get("/user_profiles") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_route_bad_app_key(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.side_effect = NoResultFound() response = client.get("/user_profiles?app_key=BAD_KEY") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profiles_route_unauthorized(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login auth_mock = mocker.patch('modules.administrators.Authentication') auth_mock.verify_password.side_effect = Unauthorized() response = client.get("/user_profiles?app_key=123") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_ok(app, mocker): expected_status = 200 expected_json = { 'created_at': None, 'first_name': None, 'id': None, 'joined_at': None, 'last_name': None, 'status': None, 'status_changed_at': None, 'user_id': None, 'updated_at': None, } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .get.return_value = UserProfile() result = get_user_profile(1) assert result[1] == expected_status assert result[0].json['user_profile'] == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_not_found(app, mocker): query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .get.return_value = None try: get_user_profile(250) assert False except NotFound: assert True @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_route_ok(app, mocker, client): expected_status = 200 # mock db query query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None # mock resource query query_mock.return_value \ .get.return_value = UserProfile() # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get("/user_profile/1?app_key=123", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert 'user_profile' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_route_no_app_key(app, client): expected_status = 401 response = client.get("/user_profile/1") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_route_bad_app_key(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.side_effect = NoResultFound() response = client.get("/user_profile/1?app_key=BAD_KEY") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_get_user_profile_route_unauthorized(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login auth_mock = mocker.patch('modules.administrators.Authentication') auth_mock.verify_password.side_effect = Unauthorized() response = client.get("/user_profile/1?app_key=123") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profile_ok(app, mocker): expected_status = 201 expected_m_length = 9 expected_m_id = None expected_m_user_id = 9 expected_m_first_name = "Service" expected_m_last_name = "Account" expected_m_joined_at = "2019-02-04T00:00:00+0000" expected_m_status = UserProfile.STATUS_ENABLED expected_m_created_at = None expected_m_updated_at = None # @todo: timezone re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$") request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock exists() validation user_9 = User() user_9.id = 9 query_mock.return_value \ .get.return_value = user_9 db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.add.return_value = None db_mock.commit.return_value = None result = post_user_profiles() assert result[1] == expected_status assert 'user_profile' in result[0].json assert len(result[0].json['user_profile']) == expected_m_length assert result[0].json['user_profile']['id'] == expected_m_id assert result[0].json['user_profile']['user_id'] == expected_m_user_id assert result[0].json['user_profile']['first_name'] == \ expected_m_first_name assert result[0].json['user_profile']['last_name'] == expected_m_last_name assert result[0].json['user_profile']['joined_at'] == expected_m_joined_at assert result[0].json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( result[0].json['user_profile']['status_changed_at'])) assert result[0].json['user_profile']['created_at'] == \ expected_m_created_at assert result[0].json['user_profile']['updated_at'] == \ expected_m_updated_at @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_required_fail(app, mocker): expected_status = 400 expected_json = { 'error': { 'user_id': ['Missing data for required field.'], 'first_name': ['Missing data for required field.'], 'foo': ['Unknown field.'], 'joined_at': ['Missing data for required field.'], 'last_name': ['Missing data for required field.'], 'status': ['Missing data for required field.'], } } request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = {'foo': "bar"} result = post_user_profiles() assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_user_exists_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'user_id': ['Invalid value.']}} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 250, 'first_name': "Service", 'last_name': "Account", 'joined_at': "2019-02-04T00:00:00+0000", 'status': UserProfile.STATUS_ENABLED } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock exists() validation query_mock.return_value \ .get.return_value = None result = post_user_profiles() assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_min_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'first_name': ["Value must be between 1 and 40 characters long."], 'last_name': ["Value must be between 2 and 40 characters long."], }} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 9, 'first_name': "", 'last_name': "A", 'joined_at': "2019-02-04T00:00:00+0000", 'status': UserProfile.STATUS_ENABLED } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock exists() validation user_9 = User() user_9.id = 9 query_mock.return_value \ .get.return_value = user_9 result = post_user_profiles() assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_max_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'first_name': ["Value must be between 1 and 40 characters long."], 'last_name': ["Value must be between 2 and 40 characters long."], }} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 9, 'first_name': "LRUNzhfsbfrfZ4BT9N6R3SNYVfAAuQdQdTSmrwFew", 'last_name': "z3Sytm4QrL8g7J4kgugEABnhwXZAnCZmrngUCeeXm", 'joined_at': "2019-02-04T00:00:00+0000", 'status': UserProfile.STATUS_ENABLED } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock exists() validation user_9 = User() user_9.id = 9 query_mock.return_value \ .get.return_value = user_9 result = post_user_profiles() assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_type_fail(app, mocker): expected_status = 400 expected_json = { "error": { 'user_id': ['Not a valid integer.'], 'first_name': ['Not a valid string.'], 'joined_at': ['Not a valid datetime.'], 'last_name': ['Not a valid string.'], 'status': ['Not a valid integer.'], } } request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 'bad', 'first_name': 123, 'joined_at': 123, 'last_name': 123, 'status': "bad", } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock exists() validation query_mock.return_value \ .get.return_value = None result = post_user_profiles() assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_route_ok(app, mocker, client): expected_status = 201 expected_m_length = 9 expected_m_id = None expected_m_user_id = 9 expected_m_first_name = "Service" expected_m_last_name = "Account" expected_m_joined_at = "2019-02-04T00:00:00+0000" expected_m_status = UserProfile.STATUS_ENABLED expected_m_created_at = None expected_m_updated_at = None # @todo: timezone re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$") data = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None # mock exists() validation user_9 = User() user_9.id = 9 query_mock.return_value \ .get.return_value = user_9 db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.add.return_value = None db_mock.commit.return_value = None # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.post( "/user_profiles?app_key=123", json=data, headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert 'user_profile' in response.json assert len(response.json['user_profile']) == expected_m_length assert response.json['user_profile']['id'] == expected_m_id assert response.json['user_profile']['user_id'] == expected_m_user_id assert response.json['user_profile']['first_name'] == \ expected_m_first_name assert response.json['user_profile']['last_name'] == expected_m_last_name assert response.json['user_profile']['joined_at'] == expected_m_joined_at assert response.json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( response.json['user_profile']['status_changed_at'])) assert response.json['user_profile']['created_at'] == \ expected_m_created_at assert response.json['user_profile']['updated_at'] == \ expected_m_updated_at @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_route_no_app_key(app, client): expected_status = 401 response = client.post("/user_profiles") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_route_bad_app_key(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.side_effect = NoResultFound() response = client.post("/user_profiles?app_key=BAD_KEY") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_post_user_profiles_route_unauthorized(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login auth_mock = mocker.patch('modules.administrators.Authentication') auth_mock.verify_password.side_effect = Unauthorized() response = client.post("/user_profiles?app_key=123") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_ok(app, mocker): expected_status = 200 expected_m_length = 9 expected_m_id = 2 expected_m_user_id = 9 expected_m_first_name = "LynneA" expected_m_last_name = "HarfordA" expected_m_joined_at = "2018-12-09T08:00:00+0000" expected_m_status = UserProfile.STATUS_DISABLED expected_m_created_at = None expected_m_updated_at = None # @todo: timezone re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$") request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } user_profile_2 = UserProfile() user_profile_2.id = expected_m_id user_9 = User() user_9.id = 9 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query and exists() validation query_mock.return_value \ .get.side_effect = [user_profile_2, user_9] db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.commit.return_value = None result = put_user_profile(expected_m_id) assert result[1] == expected_status assert 'user_profile' in result[0].json assert len(result[0].json['user_profile']) == expected_m_length assert result[0].json['user_profile']['id'] == expected_m_id assert result[0].json['user_profile']['user_id'] == expected_m_user_id assert result[0].json['user_profile']['first_name'] == \ expected_m_first_name assert result[0].json['user_profile']['last_name'] == expected_m_last_name assert result[0].json['user_profile']['joined_at'] == expected_m_joined_at assert result[0].json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( result[0].json['user_profile']['status_changed_at'])) assert result[0].json['user_profile']['created_at'] == \ expected_m_created_at assert result[0].json['user_profile']['updated_at'] == \ expected_m_updated_at @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_required_fail(app, mocker): expected_status = 400 expected_json = { 'error': { 'user_id': ['Missing data for required field.'], 'first_name': ['Missing data for required field.'], 'foo': ['Unknown field.'], 'joined_at': ['Missing data for required field.'], 'last_name': ['Missing data for required field.'], 'status': ['Missing data for required field.'], } } request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = {'foo': "bar"} user_profile_2 = UserProfile() user_profile_2.id = 2 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query and exists() validation query_mock.return_value \ .get.return_value = [user_profile_2, None] result = put_user_profile(2) assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_user_exists_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'user_id': ['Invalid value.']}} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 250, 'first_name': "LynneA", 'last_name': "HarfordA", 'joined_at': "2018-12-09T08:00:00+0000", 'status': UserProfile.STATUS_DISABLED } user_profile_2 = UserProfile() user_profile_2.id = 2 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query and exists() validation query_mock.return_value \ .get.side_effect = [user_profile_2, None] result = put_user_profile(2) assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_min_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'first_name': ["Value must be between 1 and 40 characters long."], 'last_name': ["Value must be between 2 and 40 characters long."], }} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 9, 'first_name': "", 'last_name': "H", 'joined_at': "2018-12-09T08:00:00+0000", 'status': UserProfile.STATUS_DISABLED } user_profile_2 = UserProfile() user_profile_2.id = 2 user_9 = User() user_9.id = 9 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query and exists() validation query_mock.return_value \ .get.side_effect = [user_profile_2, user_9] result = put_user_profile(2) assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_max_fail(app, mocker): expected_status = 400 expected_json = {'error': { 'first_name': ["Value must be between 1 and 40 characters long."], 'last_name': ["Value must be between 2 and 40 characters long."], }} request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 9, 'first_name': "pSJxpg6GC2qRnekNVDKMkYqNqAbd7X5UzsKuhVzf4", 'last_name': "J5ATwnHEfD5YqSQNTDcb9bFbaD6ZRZvL3b9ugjyUK", 'joined_at': "2018-12-09T08:00:00+0000", 'status': UserProfile.STATUS_DISABLED } user_profile_2 = UserProfile() user_profile_2.id = 2 user_9 = User() user_9.id = 9 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query and exists() validation query_mock.return_value \ .get.side_effect = [user_profile_2, user_9] result = put_user_profile(2) assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_type_fail(app, mocker): expected_status = 400 expected_json = { "error": { 'user_id': ['Not a valid integer.'], 'first_name': ['Not a valid string.'], 'joined_at': ['Not a valid datetime.'], 'last_name': ['Not a valid string.'], 'status': ['Not a valid integer.'], } } request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': 'bad', 'first_name': 123, 'joined_at': 123, 'last_name': 123, 'status': "bad", } user_profile_2 = UserProfile() user_profile_2.id = 2 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock initial resource query query_mock.return_value \ .get.side_effect = [user_profile_2, None] result = put_user_profile(2) assert result[1] == expected_status assert result[0].json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_route_ok(app, mocker, client): expected_status = 200 expected_m_length = 9 expected_m_id = 2 expected_m_user_id = 9 expected_m_first_name = "LynneA" expected_m_last_name = "HarfordA" expected_m_joined_at = "2018-12-09T08:00:00+0000" expected_m_status = UserProfile.STATUS_DISABLED expected_m_created_at = None expected_m_updated_at = None # @todo: timezone re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$") data = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } user_profile_2 = UserProfile() user_profile_2.id = expected_m_id user_9 = User() user_9.id = 9 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None # mock initial resource query and exists() validation query_mock.return_value \ .get.side_effect = [user_profile_2, user_9] db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.add.return_value = None db_mock.commit.return_value = None # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.put( "/user_profile/{}?app_key=123".format(expected_m_id), json=data, headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert 'user_profile' in response.json assert len(response.json['user_profile']) == expected_m_length assert response.json['user_profile']['id'] == expected_m_id assert response.json['user_profile']['user_id'] == expected_m_user_id assert response.json['user_profile']['first_name'] == \ expected_m_first_name assert response.json['user_profile']['last_name'] == expected_m_last_name assert response.json['user_profile']['joined_at'] == expected_m_joined_at assert response.json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( response.json['user_profile']['status_changed_at'])) assert response.json['user_profile']['created_at'] == \ expected_m_created_at assert response.json['user_profile']['updated_at'] == \ expected_m_updated_at @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_route_no_app_key(app, client): expected_status = 401 response = client.put("/user_profile/1") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_route_bad_app_key(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.side_effect = NoResultFound() response = client.put("/user_profile/1?app_key=BAD_KEY") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_put_user_profile_route_unauthorized(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login auth_mock = mocker.patch('modules.administrators.Authentication') auth_mock.verify_password.side_effect = Unauthorized() response = client.put("/user_profile/1?app_key=123") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_profile_ok(app, mocker): expected_status = 204 expected_content = '' query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .get.return_value = UserProfile() db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.commit.return_value = None result = delete_user_profile(7) assert result[1] == expected_status assert result[0] == expected_content @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_fail(app, mocker): query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') query_mock.return_value \ .get.return_value = None try: delete_user_profile(250) assert False except NotFound: assert True @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_profile_route_ok(app, mocker, client): expected_status = 204 expected_json = None # mock db query query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login db query role2 = Role() role2.id = 2 role2.name = 'SUPER_ADMIN' role2.password_reset_days = 365 admin1 = Administrator() admin1.id = 1 admin1.password = 'admin1pass' admin1.roles = [role2] query_mock.return_value \ .filter.return_value \ .first.return_value = admin1 auth_db_mock = mocker.patch('modules.administrators.authentication.db') auth_db_mock.add.return_value = None auth_db_mock.commit.return_value = None # mock resource query query_mock.return_value \ .get.return_value = UserProfile() # mock db commit db_mock = mocker.patch('modules.user_profiles.routes_admin.db') db_mock.commit.return_value = None # mock user login auth_mock = mocker.patch( 'modules.administrators.Authentication.is_account_locked') auth_mock.return_value = False credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.delete("/user_profile/7?app_key=123", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert response.json == expected_json @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_profile_route_no_app_key(app, client): expected_status = 401 response = client.delete("/user_profile/7") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_profile_route_bad_app_key(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.side_effect = NoResultFound() response = client.delete("/user_profile/7?app_key=BAD_KEY") assert response.status_code == expected_status assert 'error' in response.json @pytest.mark.unit @pytest.mark.admin_api def test_delete_user_profile_route_unauthorized(app, mocker, client): expected_status = 401 query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__') # mock app key authorization db query query_mock.return_value \ .filter.return_value \ .one.return_value = AppKey() # mock user login auth_mock = mocker.patch('modules.administrators.Authentication') auth_mock.verify_password.side_effect = Unauthorized() response = client.delete("/user_profile/7?app_key=123") assert response.status_code == expected_status assert 'error' in response.json # INTEGRATION TESTS @pytest.mark.integration @pytest.mark.admin_api def test_get_user_profiles_route_with_data(client): expected_status = 200 expected_json = { "limit": 10, "page": 1, "total": 6, "user_profiles": [ { "created_at": "2018-12-01T00:00:00+0000", "first_name": "Fiona", "id": 1, "joined_at": "2018-12-03T00:00:00+0000", "last_name": "Farnham", "status": 1, "status_changed_at": "2018-12-04T00:00:00+0000", "updated_at": "2018-12-02T00:00:00+0000", "user_id": 1 }, { "created_at": "2018-12-05T00:00:00+0000", "first_name": "Lynne", "id": 2, "joined_at": "2018-12-07T00:00:00+0000", "last_name": "Harford", "status": 1, "status_changed_at": "2018-12-08T00:00:00+0000", "updated_at": "2018-12-06T00:00:00+0000", "user_id": 2 }, { "created_at": "2018-12-10T00:00:00+0000", "first_name": "Duane", "id": 3, "joined_at": "2018-12-12T00:00:00+0000", "last_name": "Hargrave", "status": 1, "status_changed_at": "2018-12-13T00:00:00+0000", "updated_at": "2018-12-11T00:00:00+0000", "user_id": 3 }, { "created_at": "2018-12-20T00:00:00+0000", "first_name": "Elroy", "id": 5, "joined_at": "2018-12-22T00:00:00+0000", "last_name": "Hunnicutt", "status": 2, "status_changed_at": "2018-12-23T00:00:00+0000", "updated_at": "2018-12-21T00:00:00+0000", "user_id": 5 }, { "created_at": "2018-12-25T00:00:00+0000", "first_name": "Alease", "id": 6, "joined_at": "2018-12-27T00:00:00+0000", "last_name": "Richards", "status": 5, "status_changed_at": "2018-12-28T00:00:00+0000", "updated_at": "2018-12-26T00:00:00+0000", "user_id": 6 }, { "created_at": "2019-01-05T00:00:00+0000", "first_name": "Luke", "id": 8, "joined_at": "2019-01-07T00:00:00+0000", "last_name": "Tennyson", "status": 1, "status_changed_at": "2019-01-08T00:00:00+0000", "updated_at": "2019-01-06T00:00:00+0000", "user_id": 8 } ] } credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get( "/user_profiles?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert response.json == expected_json @pytest.mark.integration @pytest.mark.admin_api def test_get_user_profile_2_route_with_data(client): expected_status = 200 expected_json = { "user_profile": { "created_at": "2018-12-05T00:00:00+0000", "first_name": "Lynne", "id": 2, "joined_at": "2018-12-07T00:00:00+0000", "last_name": "Harford", "status": 1, "status_changed_at": "2018-12-08T00:00:00+0000", "updated_at": "2018-12-06T00:00:00+0000", "user_id": 2 } } credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.get( "/user_profile/2?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert response.json == expected_json @pytest.mark.integration @pytest.mark.admin_api def test_post_user_profiles_route_with_data(client, mocker): expected_status = 201 expected_m_length = 9 expected_m_id = 9 expected_m_user_id = 9 expected_m_first_name = "Service" expected_m_last_name = "Account" expected_m_joined_at = "2019-02-04T00:00:00+0000" expected_m_status = UserProfile.STATUS_ENABLED re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$") request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.post( "/user_profiles?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert 'user_profile' in response.json assert len(response.json['user_profile']) == expected_m_length assert response.json['user_profile']['id'] == expected_m_id assert response.json['user_profile']['user_id'] == expected_m_user_id assert response.json['user_profile']['first_name'] == \ expected_m_first_name assert response.json['user_profile']['last_name'] == expected_m_last_name assert response.json['user_profile']['joined_at'] == expected_m_joined_at assert response.json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( response.json['user_profile']['status_changed_at'])) assert bool(re_datetime.match( response.json['user_profile']['created_at'])) assert bool(re_datetime.match( response.json['user_profile']['updated_at'])) @pytest.mark.integration @pytest.mark.admin_api def test_put_user_profile_route_with_data(client, mocker): expected_status = 200 expected_m_length = 9 expected_m_id = 2 expected_m_user_id = 9 expected_m_first_name = "LynneA" expected_m_last_name = "HarfordA" expected_m_joined_at = "2018-12-09T08:00:00+0000" expected_m_status = UserProfile.STATUS_DISABLED re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$") request_mock = mocker.patch('modules.user_profiles.routes_admin.request') request_mock.json = { 'user_id': expected_m_user_id, 'first_name': expected_m_first_name, 'last_name': expected_m_last_name, 'joined_at': expected_m_joined_at, 'status': expected_m_status, } credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.put( "/user_profile/{}?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW".format( expected_m_id), headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert 'user_profile' in response.json assert len(response.json['user_profile']) == expected_m_length assert response.json['user_profile']['id'] == expected_m_id assert response.json['user_profile']['user_id'] == expected_m_user_id assert response.json['user_profile']['first_name'] == \ expected_m_first_name assert response.json['user_profile']['last_name'] == expected_m_last_name assert response.json['user_profile']['joined_at'] == expected_m_joined_at assert response.json['user_profile']['status'] == expected_m_status assert bool(re_datetime.match( response.json['user_profile']['status_changed_at'])) assert bool(re_datetime.match( response.json['user_profile']['created_at'])) assert bool(re_datetime.match( response.json['user_profile']['updated_at'])) @pytest.mark.integration @pytest.mark.admin_api def test_delete_user_profile_7_route_with_data(client): expected_status = 204 expected_json = None credentials = base64.b64encode( 'admin1:admin1pass'.encode('ascii')).decode('utf-8') response = client.delete( "/user_profile/7?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW", headers={"Authorization": f"Basic {credentials}"}) assert response.status_code == expected_status assert response.json == expected_json
31.167479
79
0.671454
6,497
51,177
4.964291
0.039095
0.064459
0.034415
0.034105
0.952377
0.938765
0.932719
0.919976
0.911977
0.898893
0
0.036532
0.213201
51,177
1,641
80
31.186472
0.764466
0.032124
0
0.817666
0
0.004862
0.216729
0.103978
0
0
0
0.000609
0.138574
1
0.038088
false
0.025122
0.012156
0
0.050243
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
514003ef09e5ac7eed9001343b34c0a0908bad47
53,282
py
Python
iiif_api_services/tests/test_collection.py
utlib/utl_iiif_api
e2d77ad5a677487e205b840cc800af46b4697eab
[ "Apache-2.0" ]
4
2019-12-08T18:39:37.000Z
2021-11-09T19:35:44.000Z
iiif_api_services/tests/test_collection.py
utlib/utl_iiif_api
e2d77ad5a677487e205b840cc800af46b4697eab
[ "Apache-2.0" ]
2
2020-06-05T16:45:31.000Z
2021-06-10T17:30:00.000Z
iiif_api_services/tests/test_collection.py
utlib/utl_iiif_api
e2d77ad5a677487e205b840cc800af46b4697eab
[ "Apache-2.0" ]
1
2019-12-08T18:45:52.000Z
2019-12-08T18:45:52.000Z
import os import json from test_addons import APIMongoTestCase from rest_framework import status from rest_framework_jwt.settings import api_settings from django.conf import settings # import the settings file to get IIIF_BASE_URL & IIIF_CONTEXT from iiif_api_services.models.User import User from iiif_api_services.models.CollectionModel import Collection from iiif_api_services.models.ManifestModel import Manifest from django.test import override_settings COLLECTION_MEDIUM = os.path.join(os.path.dirname(__file__), 'testData', 'collection', 'collectionMedium.json') COLLECTION_SHORT = os.path.join(os.path.dirname(__file__), 'testData', 'collection', 'collectionShort.json') URL = '/collections' class Collection_Test_Without_Authentication(APIMongoTestCase): def setUp(self): Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collections/book1").save() Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() Collection(label="collection3", name="book3", ATid="http://example.org/iiif/collections/book3").save() def test_to_get_a_specific_collection_of_an_name(self): response = self.client.get('/collections/book1') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["ATtype"], 'sc:Collection') self.assertEqual(response.data["label"], 'collection1') def test_a_collection_cannot_be_created(self): response = self.client.post(URL, {}) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_a_collection_cannot_be_updated(self): response = self.client.put('/collections/book2', {}) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_a_collection_cannot_be_deleted(self): response = self.client.delete('/collections/book3', {}) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) @override_settings() class Collection_Test_POST_Without_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('staff', 'staff@mail.com', 'staffpass') jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) settings.QUEUE_POST_ENABLED = False settings.QUEUE_PUT_ENABLED = False settings.QUEUE_DELETE_ENABLED = False def test_a_collection_can_be_successfully_created_with_no_nested_structures(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label parent') self.assertEqual(Collection.objects()[0].ATid, settings.IIIF_BASE_URL + "/collections/book1") def test_a_collection_can_be_successfully_created_with_one_level_nested_structures(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label child') self.assertEqual(Collection.objects()[0].ATid, settings.IIIF_BASE_URL + "/collections/top6") self.assertEqual(len(Collection.objects()), 4) self.assertEqual(len(Manifest.objects()), 3) createdCollectionID = settings.IIIF_BASE_URL + "/collections/book1" self.assertEqual(createdCollectionID in Collection.objects.get(name='top6').belongsTo, True) self.assertEqual(createdCollectionID in Manifest.objects.get(identifier='book1').belongsTo, True) def test_a_duplicate_collection_cannot_be_created(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]["name"], ['This field must be unique.']) def test_a_collection_with_no_id_given_can_be_successfully_created(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} del data["collection"]["@id"] response = self.client.post("/collections", data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label parent') def test_a_collection_cannot_be_created_with_id_being_top_level_collection_name(self): data = {"collection": {"@id": settings.IIIF_BASE_URL + "/collections/"+settings.TOP_LEVEL_COLLECTION_NAME}} response = self.client.post("/collections", data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_412_PRECONDITION_FAILED) self.assertEqual(response.data["responseBody"]["error"], "Collection name cannot be: "+settings.TOP_LEVEL_COLLECTION_NAME+".") def test_a_hidden_child_cannot_be_viewed(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get("/collections/book1") self.assertEqual(len(response.data["manifests"]), 3) manifest = Manifest.objects.get(identifier='book1') manifest.hidden = True manifest.save() response = self.client.get("/collections/book1") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data["collections"]), 3) self.assertEqual(len(response.data["manifests"]), 2) def test_an_exisinting_child_collection_will_be_updated_on_parent_creation(self): Collection(label="top6", name="top6", ATid="http://example.org/iiif/collections/top6", ownedBy=["staff"]).save() self.assertEqual(Collection.objects.get(name="top6").label, "top6") self.assertFalse(settings.IIIF_BASE_URL + "/collections/book1" in Collection.objects.get(name="top6").belongsTo) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects.get(name="top6").label, "some collection label child") self.assertTrue(settings.IIIF_BASE_URL + "/collections/book1" in Collection.objects.get(name="top6").belongsTo) def test_a_collection_cannot_be_created_with_errors_in_nested_collections(self): self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["collections"][1]["total"] = "invalid" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]['total'][0], 'A valid integer is required.') self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) def test_a_collection_cannot_be_created_with_errors_in_nested_manifests(self): self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["manifests"][1]["viewingDirection"] = ["invalid"] response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]['viewingDirection'][0], 'Not a valid string.') self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) def test_a_collection_cannot_be_created_with_errors_in_nested_members(self): self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["members"][1]["total"] = "invalid" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]['total'][0], 'A valid integer is required.') self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) def test_a_collection_cannot_be_created_with_errors_in_nested_members_with_missing_type(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" del data["collection"]["members"][1]["@type"] response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"], 'Field @type is required for member object.') def test_a_collection_cannot_be_created_with_errors_in_nested_members_with_invalid_type(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" data["collection"]["members"][1]["@type"] = "invalid" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"], 'Field @type must be sc:Collection or sc:Manifest.') def test_a_collection_created_with_errors_in_nested_members_will_clean_previous_members(self): self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" data["collection"]["members"][0]["viewingDirection"] = ["invalid"] response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]["viewingDirection"][0], 'Not a valid string.') self.assertEqual(len(Collection.objects), 0) self.assertEqual(len(Manifest.objects), 0) def test_a_collection_with_will_create_its_user_permissions_field(self): self.user = User.create_user('testStaff', 'testemail@mail.com', 'testStaffpass', False) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects.get(name="book1").ownedBy, ["testStaff"]) self.assertEqual(Collection.objects.get(name="top6").ownedBy, ["testStaff"]) self.assertEqual(Collection.objects.get(name="top98").ownedBy, ["testStaff"]) self.assertEqual(Collection.objects.get(name="top6666").ownedBy, ["testStaff"]) self.assertEqual(Manifest.objects.get(identifier="book1").ownedBy, ["testStaff"]) self.assertEqual(Manifest.objects.get(identifier="book2").ownedBy, ["testStaff"]) self.assertEqual(Manifest.objects.get(identifier="book3").ownedBy, ["testStaff"]) @override_settings() class Collection_Test_POST_With_THREAD_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('staff', 'staff@mail.com', 'staffpass') jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'THREAD' def test_a_collection_can_be_successfully_created_with_no_nested_structures(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label parent') self.assertEqual(Collection.objects()[0].ATid, settings.IIIF_BASE_URL + "/collections/book1") @override_settings() class Collection_Test_POST_With_PROCESS_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('staff', 'staff@mail.com', 'staffpass') jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'PROCESS' def test_a_collection_can_be_successfully_created_with_no_nested_structures(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label parent') self.assertEqual(Collection.objects()[0].ATid, settings.IIIF_BASE_URL + "/collections/book1") @override_settings() class Collection_Test_POST_With_CELERY_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('staff', 'staff@mail.com', 'staffpass') jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'CELERY' def test_a_collection_can_be_successfully_created_with_no_nested_structures(self): data = {"collection": json.loads(open(COLLECTION_SHORT).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertEqual(Collection.objects()[0].label, 'some collection label parent') self.assertEqual(Collection.objects()[0].ATid, settings.IIIF_BASE_URL + "/collections/book1") class Collection_Test_GET(APIMongoTestCase): def setUp(self): Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collections/book1").save() def test_a_collection_from_an_name_that_does_not_exist_cannot_be_viewed(self): response = self.client.get("/collections/nonExistingCollection") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertEqual(response.data["error"], "Collection with name 'nonExistingCollection' does not exist.") def test_default_top_level_collection_can_be_viewed(self): response = self.client.get("/collections/"+settings.TOP_LEVEL_COLLECTION_NAME) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["ATid"], settings.IIIF_BASE_URL + "/collections/"+settings.TOP_LEVEL_COLLECTION_NAME) def test_default_uoft_collection_can_be_viewed_from_root_endpoint(self): response = self.client.get("/collections") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["ATid"], settings.IIIF_BASE_URL + "/collections/"+settings.TOP_LEVEL_COLLECTION_NAME) def test_a_collection_can_be_viewed(self): response = self.client.get("/collections/book1") self.assertEqual(response.status_code, status.HTTP_200_OK) @override_settings() class Collection_Test_DELETE_Without_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = False settings.QUEUE_PUT_ENABLED = False settings.QUEUE_DELETE_ENABLED = False def test_a_collection_can_be_deleted_sucessfully(self): response = self.client.delete("/collections/book2") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_204_NO_CONTENT) self.assertEqual(response.data["responseBody"]['message'], "Successfully deleted Collection 'book2'.") def test_a_collection_that_does_not_exist_cannot_be_deleted(self): response = self.client.delete("/collections/nonExistingItem") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_404_NOT_FOUND) self.assertEqual(response.data["responseBody"]["error"], "Collection with name 'nonExistingItem' does not exist.") def test_deleting_a_collection_will_delete_all_of_its_nested_objects(self): self.assertEqual(len(Manifest.objects), 0) self.assertEqual(len(Collection.objects), 1) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.delete("/collections/book1") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_204_NO_CONTENT) self.assertEqual(response.data["responseBody"]['message'], "Successfully deleted Collection 'book1'.") self.assertEqual(len(Manifest.objects), 0) self.assertEqual(len(Collection.objects), 1) @override_settings() class Collection_Test_DELETE_With_THREAD_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'THREAD' def test_a_collection_can_be_deleted_sucessfully(self): response = self.client.delete("/collections/book2") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_204_NO_CONTENT) self.assertEqual(response.data["responseBody"]['message'], "Successfully deleted Collection 'book2'.") @override_settings() class Collection_Test_DELETE_With_PROCESS_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'PROCESS' def test_a_collection_can_be_deleted_sucessfully(self): response = self.client.delete("/collections/book2") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_204_NO_CONTENT) self.assertEqual(response.data["responseBody"]['message'], "Successfully deleted Collection 'book2'.") @override_settings() class Collection_Test_DELETE_With_CELERY_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'CELERY' def test_a_collection_can_be_deleted_sucessfully(self): response = self.client.delete("/collections/book2") if settings.QUEUE_DELETE_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_204_NO_CONTENT) self.assertEqual(response.data["responseBody"]['message'], "Successfully deleted Collection 'book2'.") @override_settings() class Collection_Test_PUT_Without_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collectionsShort/book1", viewingHint="paged").save() Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = False settings.QUEUE_PUT_ENABLED = False settings.QUEUE_DELETE_ENABLED = False def test_a_collection_can_be_updated_sucessfully(self): data = {"collection": {"label": "new_collection1", "viewingHint": "non-paged"}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects()[0].label, 'new_collection1') self.assertEqual(Collection.objects()[0].viewingHint, 'non-paged') def test_a_collection_cannot_be_updated_with_invalid_data(self): data = {"collection": {"label": "new_collection1", "viewingHint": ["non-paged"]}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]["viewingHint"][0], "Not a valid string.") def test_an_name_collection_that_does_not_exist_cannot_be_updated(self): data = {"collection": {"label": "new_collection1", "viewingHint": "non-paged"}} response = self.client.put("/collections/nonExistingCollection", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_404_NOT_FOUND) self.assertEqual(response.data["responseBody"]["error"], "Collection with name 'nonExistingCollection' does not exist.") def test_a_collection_with_new_id_can_be_updated_successfully(self): data = {"collection": {"@id": "http://example.org/iiif/collections/new_book1", "viewingHint": "non-paged"}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(response.data["responseBody"]["@id"], settings.IIIF_BASE_URL + "/collections/new_book1") def test_the_top_level_collection_cannot_be_updated(self): data = {"collection": {"@id": "http://example.org/iiif/collections/new_book1", "viewingHint": "non-paged"}} response = self.client.put("/collections/"+settings.TOP_LEVEL_COLLECTION_NAME, data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) print response.data["responseBody"] self.assertEqual(response.data["responseCode"], status.HTTP_412_PRECONDITION_FAILED) self.assertEqual(response.data["responseBody"]["error"], "Top level Collection cannot be edited.") def test_a_collection_with_nested_objects_can_be_updated_successfully(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes self.assertEqual(Collection.objects()[2].label, 'some collection label child') data["collection"]["collections"][0]["label"] = 'NEW LABEL' response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects()[2].label, "NEW LABEL") def test_a_collection_with_nested_objects_no_id_can_be_updated_successfully(self): self.assertEqual(len(Manifest.objects), 0) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes self.assertEqual(len(Manifest.objects), 3) del data["collection"]["members"][0]["@id"] response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(len(Manifest.objects), 4) def test_a_collection_with_new_id_will_update_its_nested_objects_belongsTo_field(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes self.assertEqual(Collection.objects.get(name='top6').belongsTo[0], settings.IIIF_BASE_URL + "/collections/book3") self.assertEqual(Collection.objects.get(name='top98').belongsTo[0], settings.IIIF_BASE_URL + "/collections/book3") self.assertEqual(Collection.objects.get(name='top6666').belongsTo[0], settings.IIIF_BASE_URL + "/collections/book3") data["collection"]["@id"] = "http://example.org/iiif/collections/not-book3" response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects.get(name='top6').belongsTo[0], settings.IIIF_BASE_URL + "/collections/not-book3") self.assertEqual(Collection.objects.get(name='top98').belongsTo[0], settings.IIIF_BASE_URL + "/collections/not-book3") self.assertEqual(Collection.objects.get(name='top6666').belongsTo[0], settings.IIIF_BASE_URL + "/collections/not-book3") def test_a_collection_with_new_id_will_update_its_parent_objects_children_field(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book35" data["collection"]['members'][1]['collections'] = [{"@id": "http://example.org/iiif/collection/book_3_child"}] response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes self.assertEqual(Collection.objects.get(name='book_3_child').belongsTo[0], settings.IIIF_BASE_URL + "/collections/top6666") self.assertEqual(Collection.objects.get(name='top6666').children[0], settings.IIIF_BASE_URL + "/collections/book_3_child") data = {"collection": {"@id": "http://example.org/iiif/collections/not_book3_child"}} response = self.client.put(URL+"/book_3_child", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects.get(name='not_book3_child').belongsTo[0], settings.IIIF_BASE_URL + "/collections/top6666") self.assertEqual(Collection.objects.get(name='top6666').children[0], settings.IIIF_BASE_URL + "/collections/not_book3_child") def test_a_collection_cannot_be_updated_with_errors_in_nested_collections(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes data["collection"]["collections"][1]["total"] = "invalid" response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]['total'][0], 'A valid integer is required.') def test_a_collection_cannot_be_updated_with_errors_in_nested_collections(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes data["collection"]["collections"][1]["viewingHint"] = ["invalid"] response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]["viewingHint"][0], "Not a valid string.") def test_a_collection_can_be_updated_with_nested_members_with_missing_id(self): self.assertEqual(len(Collection.objects), 2) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes self.assertEqual(len(Collection.objects), 6) del data["collection"]["members"][1]["@id"] response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(len(Collection.objects), 7) def test_a_collection_cannot_be_updated_with_errors_in_nested_members_with_missing_type(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes del data["collection"]["members"][1]["@type"] response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"], 'Field @type is required for member object.') def test_a_collection_cannot_be_updated_with_errors_in_nested_members_with_invalid_type(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes data["collection"]["members"][1]["@type"] = "invalid" response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"], 'Field @type must be sc:Collection or sc:Manifest.') def test_a_collection_cannot_be_updated_with_errors_in_nested_manifests(self): data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes data["collection"]["manifests"][1]["viewingDirection"] = ["invalid"] response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_422_UNPROCESSABLE_ENTITY) self.assertEqual(response.data["responseBody"]["error"]['viewingDirection'][0], 'Not a valid string.') def test_a_collection_can_be_updated_with_a_new_belongsTo_field_will_replace_existing_values(self): Collection(label="collection2", name="book3", ATid="http://example.org/iiif/collections/book3", belongsTo=[settings.IIIF_BASE_URL +"/collections/book1"]).save() self.assertEqual(Collection.objects.get(name='book3').belongsTo, [settings.IIIF_BASE_URL + "/collections/book1"]) self.assertFalse("http://example.org/iiif/collections/book2" in Collection.objects.get(name='book3').belongsTo) data = {"collection": {"belongsTo": ["http://example.org/iiif/collections/book2"]}} response = self.client.put(URL+"/book3", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertFalse(settings.IIIF_BASE_URL + "/collections/book1" in Collection.objects.get(name='book3').belongsTo) self.assertTrue("http://example.org/iiif/collections/book2" in Collection.objects.get(name='book3').belongsTo) def test_an_embedded_collection_updated_with_a_new_belongsTo_field_will_append_existing_values(self): Collection(label="top6", name="top6", ATid="http://example.org/iiif/collections/top6", belongsTo=[settings.IIIF_BASE_URL +"/collections/book156"]).save() self.assertEqual(Collection.objects.get(name='top6').belongsTo, [settings.IIIF_BASE_URL + "/collections/book156"]) self.assertFalse(settings.IIIF_BASE_URL + "/collections/book3" in Collection.objects.get(name='top6').belongsTo) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertTrue(settings.IIIF_BASE_URL + "/collections/book3" in Collection.objects.get(name='top6').belongsTo) self.assertTrue(settings.IIIF_BASE_URL +"/collections/book156" in Collection.objects.get(name='top6').belongsTo) def test_an_embedded_manifest_updated_with_a_new_belongsTo_field_will_append_existing_values(self): Manifest(label="top6", identifier="book1", ATid="http://example.org/iiif/book1/manifest", belongsTo=[settings.IIIF_BASE_URL +"/collections/book156"]).save() self.assertEqual(Manifest.objects.get(identifier='book1').belongsTo, [settings.IIIF_BASE_URL + "/collections/book156"]) self.assertFalse(settings.IIIF_BASE_URL + "/collections/book3" in Manifest.objects.get(identifier='book1').belongsTo) data = {"collection": json.loads(open(COLLECTION_MEDIUM).read())} data["collection"]["@id"] = "http://example.org/iiif/collections/book3" response = self.client.post(URL, data) if settings.QUEUE_POST_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_201_CREATED) self.assertTrue(settings.IIIF_BASE_URL + "/collections/book3" in Manifest.objects.get(identifier='book1').belongsTo) self.assertTrue(settings.IIIF_BASE_URL +"/collections/book156" in Manifest.objects.get(identifier='book1').belongsTo) @override_settings() class Collection_Test_PUT_With_THREAD_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collectionsShort/book1", viewingHint="paged").save() Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'THREAD' def test_a_collection_can_be_updated_sucessfully(self): data = {"collection": {"label": "new_collection1", "viewingHint": "non-paged"}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects()[0].label, 'new_collection1') self.assertEqual(Collection.objects()[0].viewingHint, 'non-paged') @override_settings() class Collection_Test_PUT_With_PROCESS_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collectionsShort/book1", viewingHint="paged").save() Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'PROCESS' def test_a_collection_can_be_updated_sucessfully(self): data = {"collection": {"label": "new_collection1", "viewingHint": "non-paged"}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects()[0].label, 'new_collection1') self.assertEqual(Collection.objects()[0].viewingHint, 'non-paged') @override_settings() class Collection_Test_PUT_With_CELERY_QUEUE(APIMongoTestCase): def setUp(self): self.user = User.create_user('testadmin', 'testemail@mail.com', 'testadminpass', True) jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.user) token = jwt_encode_handler(payload) self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Collection(label="collection1", name="book1", ATid="http://example.org/iiif/collectionsShort/book1", viewingHint="paged").save() Collection(label="collection2", name="book2", ATid="http://example.org/iiif/collections/book2").save() settings.QUEUE_POST_ENABLED = True settings.QUEUE_PUT_ENABLED = True settings.QUEUE_DELETE_ENABLED = True settings.QUEUE_RUNNER = 'CELERY' def test_a_collection_can_be_updated_sucessfully(self): data = {"collection": {"label": "new_collection1", "viewingHint": "non-paged"}} response = self.client.put("/collections/book1", data) if settings.QUEUE_PUT_ENABLED: while self.client.get(response.data["status"]).status_code!=status.HTTP_301_MOVED_PERMANENTLY: pass # Wait till background process finishes response = self.client.get(response.data["status"]) self.assertEqual(response.data["responseCode"], status.HTTP_200_OK) self.assertEqual(Collection.objects()[0].label, 'new_collection1') self.assertEqual(Collection.objects()[0].viewingHint, 'non-paged')
66.436409
168
0.720056
6,417
53,282
5.747546
0.035998
0.048262
0.053197
0.056369
0.951467
0.935497
0.918578
0.898921
0.878938
0.86183
0
0.014242
0.153973
53,282
802
169
66.436409
0.80394
0.041065
0
0.771389
0
0
0.16633
0.005956
0
0
0
0
0.234222
0
null
null
0.096774
0.014025
null
null
0.001403
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
9
5a95fdfcc2eacb26f51fffde772f548fbf419cd4
2,814
py
Python
pure_cols_checker.py
Yasin-VU/simple-python-bibliometrics
2491043516e7f3ee42a2bc370efedf1a8ceffd6f
[ "BSD-3-Clause" ]
6
2020-01-29T13:33:08.000Z
2022-01-17T18:39:28.000Z
pure_cols_checker.py
descartesmbogning/simple-python-bibliometrics
2491043516e7f3ee42a2bc370efedf1a8ceffd6f
[ "BSD-3-Clause" ]
5
2020-01-29T13:39:10.000Z
2020-02-12T06:39:57.000Z
pure_cols_checker.py
descartesmbogning/simple-python-bibliometrics
2491043516e7f3ee42a2bc370efedf1a8ceffd6f
[ "BSD-3-Clause" ]
1
2021-04-20T19:28:20.000Z
2021-04-20T19:28:20.000Z
orig = ['Unnamed: 0', 'Title of the contribution in original language', 'Current publication status > Date', 'Subtitle of the contribution in original language', 'Type', 'Workflow > Step', 'Original language', 'Electronic version(s) of this work > DOI (Digital Object Identifier)[1]', 'Organisations > Organisational unit[1]', 'Organisations > Organisational unit[2]', 'Organisations > Organisational unit[3]', 'Organisations > Organisational unit[4]', 'Organisations > Organisational unit[5]', 'Organisations > Organisational unit[6]', 'Organisations > Organisational unit[7]', 'Organisations > Organisational unit[8]', 'Organisations > Organisational unit[9]', 'Organisations > Organisational unit[10]', 'Journal > Journal[1]:Titles', 'Journal > Journal[1]:ISSNs', 'UUID', 'DOI', 'orig_doi', 'upw_doi', 'upw_doi_lowercase', 'upw_error', 'upw_error_message', 'upw_free_fulltext_url', 'upw_is_boai_license', 'upw_is_free_to_read', 'upw_is_subscription_journal', 'upw_license', 'upw_oa_color_x', 'upw_oa_color_verbose', 'ff', 'ff_provided_organization_string', 'ff_match', 'ff_score', 'ff_terms', 'ff_message', 'ff_match_subgroup', 'DOI_isnull', 'pub_uuid', 'pure_year'] new = ['Unnamed: 0', 'Title of the contribution in original language', 'Current publication status > Date', 'Subtitle of the contribution in original language', 'Type', 'Workflow > Step', 'Original language', 'Electronic version(s) of this work > DOI (Digital Object Identifier)[1]', 'Organisations > Organisational unit[1]', 'Organisations > Organisational unit[2]', 'Organisations > Organisational unit[3]', 'Organisations > Organisational unit[4]', 'Organisations > Organisational unit[5]', 'Organisations > Organisational unit[6]', 'Organisations > Organisational unit[7]', 'Organisations > Organisational unit[8]', 'Organisations > Organisational unit[9]', 'Organisations > Organisational unit[10]', 'Journal > Journal[1]:Titles', 'Journal > Journal[1]:ISSNs', 'UUID', 'DOI', 'orig_doi', 'own_doi_lowercase', 'upw_doi', 'upw_doi_lowercase', 'upw_error', 'upw_error_message', 'upw_free_fulltext_url', 'upw_is_boai_license', 'upw_is_free_to_read', 'upw_is_subscription_journal', 'upw_license', 'upw_oa_color_x', 'upw_oa_color_verbose', 'ff', 'ff_provided_organization_string', 'ff_match', 'ff_score', 'ff_terms', 'ff_message', 'ff_match_subgroup', 'DOI_isnull', 'pub_uuid', 'pure_year'] print(orig == new)
29.621053
83
0.634684
302
2,814
5.649007
0.251656
0.31653
0.363423
0.044549
0.98007
0.98007
0.98007
0.98007
0.98007
0.98007
0
0.013908
0.233475
2,814
94
84
29.93617
0.777005
0
0
0.955556
0
0
0.707326
0.056188
0
0
0
0
0
1
0
false
0
0
0
0
0.011111
0
0
0
null
1
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
51a24cd01db8fe17d61aa716e6112a4d7041d48b
13,294
py
Python
RNN_Cell.py
srnn/sRNN
00618c719553bf54f693b51bef6bec8eb233e51a
[ "MIT" ]
null
null
null
RNN_Cell.py
srnn/sRNN
00618c719553bf54f693b51bef6bec8eb233e51a
[ "MIT" ]
null
null
null
RNN_Cell.py
srnn/sRNN
00618c719553bf54f693b51bef6bec8eb233e51a
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import numpy as np from expRNN.exprnn import modrelu from expRNN.initialization import henaff_init, cayley_init, random_orthogonal_init from expRNN.exp_numpy import expm, expm_frechet import snorm verbose = False def rvs(dim=3): random_state = np.random H = np.eye(dim) D = np.ones((dim,)) for n in range(1, dim): x = random_state.normal(size=(dim-n+1,)) D[n-1] = np.sign(x[0]) x[0] -= D[n-1]*np.sqrt((x*x).sum()) # Householder transformation Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum()) mat = np.eye(dim) mat[n-1:, n-1:] = Hx H = np.dot(H, mat) # Fix the last sign such that the determinant is 1 D[-1] = (-1)**(1-(dim % 2))*D.prod() # Equivalent to np.dot(np.diag(D), H) but faster, apparently H = (D*H.T).T return H class RNNCell(nn.Module): def __init__(self,inp_size,hid_size,nonlin,bias=True,cuda=False,r_initializer=henaff_init,i_initializer=nn.init.xavier_normal_): super(RNNCell, self).__init__() self.cudafy = cuda self.hidden_size = hid_size #Add Non linearity if nonlin == 'relu': self.nonlinearity = nn.ReLU() if nonlin == 'modrelu': self.nonlinearity = modrelu(hid_size) elif nonlin == 'tanh': self.nonlinearity = nn.Tanh() elif nonlin == 'sigmoid': self.nonlinearity = nn.Sigmoid() else: self.nonlinearity = None # Create linear layer to act on input X self.U = nn.Linear(inp_size, hid_size, bias=bias) self.i_initializer = i_initializer self.V = nn.Linear(hid_size, hid_size, bias=False) self.r_initializer = r_initializer self.reset_parameters() def reset_parameters(self): self.i_initializer(self.U.weight.data) if self.r_initializer == random_orthogonal_init or \ self.r_initializer == henaff_init or \ self.r_initializer == cayley_init: self.V.weight.data = self._B( torch.as_tensor(self.r_initializer(self.hidden_size))) else: print('other') self.r_initializer(self.L.weight.data) def _A(self,gradients=False): A = self.V.weight.data if not gradients: A = A.data A = A.triu(diagonal=1) return A-A.t() def _B(self,gradients=False): return expm(self._A()) def _norm(self): norm = snorm.spectral_norm(self.V.weight.data) return norm def forward(self, x, hidden = None): if hidden is None: hidden = x.new_zeros(x.shape[0],self.hidden_size, requires_grad=True) self.first_hidden = hidden h = self.U(x) + self.V(hidden) if self.nonlinearity: h = self.nonlinearity(h) return h class OrthoRNNCell(nn.Module): def __init__(self,inp_size,hid_size,nonlin,bias=False,cuda=False,r_initializer=henaff_init,i_initializer=nn.init.xavier_normal_): super(OrthoRNNCell,self).__init__() self.cudafy = cuda self.hidden_size = hid_size #Add Non linearity if nonlin == 'relu': self.nonlinearity = nn.ReLU() if nonlin == 'modrelu': self.nonlinearity = modrelu(hid_size) elif nonlin == 'tanh': self.nonlinearity = nn.Tanh() elif nonlin == 'sigmoid': self.nonlinearity = nn.Sigmoid() else: self.nonlinearity = None # Create linear layer to act on input X self.U = nn.Linear(inp_size, hid_size, bias=bias) self.i_initializer = i_initializer self.r_initializer = r_initializer # determine if P is learnable, if P is learnable determine how self.log_P = torch.Tensor(hid_size,hid_size) self.log_P = nn.Parameter(self.log_P) self.P = torch.Tensor(hid_size,hid_size) self.P = nn.Parameter(self.P) self.gamma = nn.Parameter(torch.ones(1)) UppT = torch.zeros(hid_size,hid_size) self.UppT = UppT self.UppT = nn.Parameter(self.UppT) self.M = torch.triu(torch.ones_like(self.UppT),diagonal=1) self.D = torch.zeros_like(self.UppT) # Create rotations and mask M for *.3 and *.4 self.thetas = [0]*int(hid_size) for i in range(0,len(self.thetas)): self.thetas[i] = nn.Parameter(torch.Tensor([np.random.uniform(0,2*3.14)])) self.register_parameter('theta_{}'.format(i),self.thetas[i]) self.alpha_crit = nn.MSELoss() self.alphas = [0]*int(hid_size/2) for i in range(0,len(self.alphas)): self.alphas[i] = nn.Parameter(torch.Tensor([np.random.uniform(1.00,1.00)])) self.register_parameter('alpha_{}'.format(i),self.alphas[i]) self.reset_parameters() # cudafy variables if needed if cuda: self.P.data = self.P.data.cuda() self.log_P.data = self.log_P.data.cuda() self.M = self.M.cuda() self.D = self.D.cuda() for item in self.thetas: item = item.cuda() for item in self.alphas: item = item.cuda() def reset_parameters(self): if self.r_initializer == random_orthogonal_init or \ self.r_initializer == henaff_init or \ self.r_initializer == cayley_init: self.P.data = self._B( torch.as_tensor(self.r_initializer(self.hidden_size), dtype=torch.float32)) else: self.r_initializer(self.P.data) def _A(self,gradients=False): A = self.log_P if not gradients: A = A.data A = A.triu(diagonal=1) return A-A.t() def _norm(self): norm = 1 if hasattr(self, 'rec'): norm = snorm.spectral_norm(self.rec.data) return norm def _B(self,gradients=False): return expm(self._A()) def orthogonal_step(self,optimizer): A = self._A(False) B = self.P.data G = self.P.grad.data BtG = B.t().mm(G) grad = 0.5*(BtG - BtG.t()) frechet_deriv = B.mm(expm_frechet(-A, grad)) self.log_P.grad = (frechet_deriv - frechet_deriv.t()).triu(diagonal=1) optimizer.step() self.P.data = self._B(False) self.P.grad.data.zero_() def forward(self, x,hidden=None): if hidden is None: hidden = x.new_zeros(x.shape[0],self.hidden_size, requires_grad=True) self.first_hidden = hidden self.calc_rec() h = self.U(x) + torch.matmul(hidden,self.rec) if self.nonlinearity: h = self.nonlinearity(h) return h def calc_rec(self): self.calc_D() self.calc_alpha_block() self.rec = torch.matmul(torch.matmul(self.P,torch.mul(self.UppT,self.M)+torch.mul(self.alpha_block,self.D)),self.P.t()) def calc_D(self): self.D = torch.zeros_like(self.UppT) for i in range(0,self.hidden_size,2): self.D[i,i] = self.thetas[int(i/2)].cos() self.D[i,i+1] = -self.thetas[int(i/2)].sin() self.D[i+1,i] = self.thetas[int(i/2)].sin() self.D[i+1,i+1] = self.thetas[int(i/2)].cos() def calc_alpha_block(self): self.alpha_block = torch.zeros_like(self.UppT) for i in range(0,self.hidden_size,2): self.alpha_block[i,i] = self.alphas[int(i/2)] self.alpha_block[i+1,i] = self.alphas[int(i/2)] self.alpha_block[i,i+1] = self.alphas[int(i/2)] self.alpha_block[i+1,i+1] = self.alphas[int(i/2)] def alpha_loss(self,lam): reg_loss = 0 for alph in range(len(self.alphas)): reg_loss += lam*self.alpha_crit(self.alphas[alph],torch.ones_like(self.alphas[alph])) return reg_loss class OrthoRNNCell2(nn.Module): def __init__(self,inp_size,hid_size, slen, nonlin,bias=False,cuda=False,r_initializer=henaff_init,i_initializer=nn.init.xavier_normal_): super(OrthoRNNCell2,self).__init__() self.cudafy = cuda self.hidden_size = hid_size #Add Non linearity if nonlin == 'relu': self.nonlinearity = nn.ReLU() if nonlin == 'modrelu': self.nonlinearity = modrelu(hid_size) elif nonlin == 'tanh': self.nonlinearity = nn.Tanh() elif nonlin == 'sigmoid': self.nonlinearity = nn.Sigmoid() else: self.nonlinearity = None # Create linear layer to act on input X self.U = nn.Linear(inp_size, hid_size, bias=bias) self.i_initializer = i_initializer self.r_initializer = r_initializer # determine if P is learnable, if P is learnable determine how self.log_P = torch.Tensor(hid_size,hid_size) self.log_P = nn.Parameter(self.log_P) self.P = torch.Tensor(hid_size,hid_size) self.P = nn.Parameter(self.P) self.gamma = nn.Parameter(torch.rand(slen)) UppT = torch.zeros(hid_size,hid_size) self.UppT = UppT self.UppT = nn.Parameter(self.UppT) self.M = torch.triu(torch.ones_like(self.UppT),diagonal=1) self.D = torch.zeros_like(self.UppT) # Create rotations and mask M for *.3 and *.4 self.thetas = [0]*int(hid_size) for i in range(0,len(self.thetas)): self.thetas[i] = nn.Parameter(torch.Tensor([np.random.uniform(0,2*3.14)])) self.register_parameter('theta_{}'.format(i),self.thetas[i]) self.alpha_crit = nn.MSELoss() self.alphas = [0]*int(hid_size/2) for i in range(0,len(self.alphas)): self.alphas[i] = nn.Parameter(torch.Tensor([np.random.uniform(1.00,1.00)])) self.register_parameter('alpha_{}'.format(i),self.alphas[i]) self.reset_parameters() # cudafy variables if needed if cuda: self.P.data = self.P.data.cuda() self.log_P.data = self.log_P.data.cuda() self.M = self.M.cuda() self.D = self.D.cuda() for item in self.thetas: item = item.cuda() for item in self.alphas: item = item.cuda() def reset_parameters(self): if self.r_initializer == random_orthogonal_init or \ self.r_initializer == henaff_init or \ self.r_initializer == cayley_init: self.P.data = self._B( torch.as_tensor(self.r_initializer(self.hidden_size), dtype=torch.float32)) else: self.r_initializer(self.P.data) def _A(self,gradients=False): A = self.log_P if not gradients: A = A.data A = A.triu(diagonal=1) return A-A.t() def get_alpha(self): return self.gamma.clone().detach().cpu().numpy() def _norm(self): norm = 1 if hasattr(self, 'rec'): norm = snorm.spectral_norm(self.rec.data) return norm def _B(self,gradients=False): return expm(self._A()) def orthogonal_step(self,optimizer): A = self._A(False) B = self.P.data G = self.P.grad.data BtG = B.t().mm(G) grad = 0.5*(BtG - BtG.t()) frechet_deriv = B.mm(expm_frechet(-A, grad)) self.log_P.grad = (frechet_deriv - frechet_deriv.t()).triu(diagonal=1) optimizer.step() # def orth_complete(self): self.P.data = self._B(False) self.P.grad.data.zero_() def forward(self, x,hidden=None, i=1): if hidden is None: hidden = x.new_zeros(x.shape[0],self.hidden_size, requires_grad=True) self.first_hidden = hidden self.calc_rec() h = self.U(x) + torch.matmul(hidden,self.rec) if self.nonlinearity: h = (1- torch.exp(-self.gamma[i] * h)) * self.nonlinearity(h) else: h = (1- torch.exp(-self.gamma[i] * h)) * h return h def calc_rec(self): self.calc_D() self.calc_alpha_block() self.rec = torch.matmul(torch.matmul(self.P,torch.mul(self.UppT,self.M)+torch.mul(self.alpha_block,self.D)),self.P.t()) def calc_D(self): self.D = torch.zeros_like(self.UppT) for i in range(0,self.hidden_size,2): self.D[i,i] = self.thetas[int(i/2)].cos() self.D[i,i+1] = -self.thetas[int(i/2)].sin() self.D[i+1,i] = self.thetas[int(i/2)].sin() self.D[i+1,i+1] = self.thetas[int(i/2)].cos() def calc_alpha_block(self): self.alpha_block = torch.zeros_like(self.UppT) for i in range(0,self.hidden_size,2): self.alpha_block[i,i] = self.alphas[int(i/2)] self.alpha_block[i+1,i] = self.alphas[int(i/2)] self.alpha_block[i,i+1] = self.alphas[int(i/2)] self.alpha_block[i+1,i+1] = self.alphas[int(i/2)] def alpha_loss(self,lam): reg_loss = 0 for alph in range(len(self.alphas)): reg_loss += lam*self.alpha_crit(self.alphas[alph],torch.ones_like(self.alphas[alph])) return reg_loss
35.076517
140
0.578381
1,910
13,294
3.888482
0.097906
0.028275
0.038777
0.011849
0.877609
0.870473
0.870473
0.870473
0.860778
0.848391
0
0.012661
0.287047
13,294
378
141
35.169312
0.770943
0.044456
0
0.838926
0
0
0.008592
0
0
0
0
0
0
1
0.100671
false
0
0.02349
0.013423
0.187919
0.003356
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
51a998722d49fcabeb2cd37cb356eedeed72a45b
8,525
py
Python
test/regression/daily/ledger_lte.py
trust-tech/fabric
1cdbfc04fc9d65b58b13718b78585dae48c1b69c
[ "Apache-2.0" ]
null
null
null
test/regression/daily/ledger_lte.py
trust-tech/fabric
1cdbfc04fc9d65b58b13718b78585dae48c1b69c
[ "Apache-2.0" ]
null
null
null
test/regression/daily/ledger_lte.py
trust-tech/fabric
1cdbfc04fc9d65b58b13718b78585dae48c1b69c
[ "Apache-2.0" ]
null
null
null
import unittest import subprocess class perf_goleveldb(unittest.TestCase): def test_FAB_3790_VaryNumParallelTxPerChain(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of parallel transactions per chain and observe the performance. Passing criteria: all subtests (8) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumParallelTxPerChain", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 8) def test_FAB_3795_VaryNumChain(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of chains (ledgers). Passing criteria: all subtests (8) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumChain", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 8) def test_FAB_3798_VaryNumParallelTxWithSingleChain(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of parallel transactions on a single chain. Passing criteria: all subtests (8) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumParallelTxWithSingleChain", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 8) def test_FAB_3799_VaryNumChainWithNoParallelism(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of chains without any parallelism within a single chain. Passing criteria: all subtests (8) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumChainWithNoParallelism", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 8) def test_FAB_3801_VaryKVSize(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the size of key-value. Passing criteria: all subtests (5) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyKVSize", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 5) def test_FAB_3802_VaryBatchSize(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the value of the batch size Passing criteria: all subtests (4) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyBatchSize", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 4) def test_FAB_3800_VaryNumKeysInEachTX(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of keys in each transaction. Passing criteria: all subtests (5) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumKeysInEachTX", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 5) def test_FAB_3803_VaryNumTxs(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of transactions carried out. Passing criteria: all subtests (4) completed successfully ''' result = subprocess.check_output( "./runbenchmarks.sh varyNumTxs", shell=True, stderr=subprocess.STDOUT, cwd='../../tools/LTE/scripts') completion_count = result.count("PASS") self.assertEqual(completion_count, 4) class perf_couchdb(unittest.TestCase): @unittest.skip("WIP, skipping") def test_FAB_3870_VaryNumParallelTxPerChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of parallel transactions per chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3871_VaryNumChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of chains (ledgers). ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3872_VaryNumParallelTxWithSingleChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, vary the number of parallel transactions on a single chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3873_VaryNumChainWithNoParallelism(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of chains without any parallelism. within a single chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3874_VaryKVSize(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, varying the size of key-value. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3875_VaryBatchSize(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the value of the batch size. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3876_VaryNumKeysInEachTX(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of keys in each transaction. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3877_VaryNumTxs(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of transactions carried out. ''' self.assertTrue(True)
41.183575
78
0.644575
966
8,525
5.611801
0.114907
0.029515
0.065855
0.061981
0.930087
0.924368
0.924368
0.915698
0.900572
0.887106
0
0.013027
0.279648
8,525
206
79
41.383495
0.869728
0.44176
0
0.666667
0
0
0.157788
0.068493
0
0
0
0
0.190476
1
0.190476
false
0.095238
0.02381
0
0.238095
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
51de4de9bdeb9237862aca0ba8f95f0aafcf1aeb
132
py
Python
viz/__init__.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
3
2021-06-23T10:27:11.000Z
2022-02-09T07:50:42.000Z
viz/__init__.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
3
2021-06-23T09:36:29.000Z
2022-01-13T03:38:16.000Z
viz/__init__.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
null
null
null
from viz.backtesting import * from viz.data import * from viz.fit import * from viz.forecast import * from viz.uncertainty import *
22
29
0.772727
20
132
5.1
0.4
0.343137
0.509804
0
0
0
0
0
0
0
0
0
0.151515
132
5
30
26.4
0.910714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
51f2317764cfa41342dda3b67e0ccd71d4403954
4,272
py
Python
tests/read_only_tests.py
Don42/strichliste-django
0e7e005de7bf4852c544710f5cb8fdc6d0790763
[ "MIT" ]
1
2016-02-24T07:46:20.000Z
2016-02-24T07:46:20.000Z
tests/read_only_tests.py
hackerspace-bootstrap/strichliste-django
0e7e005de7bf4852c544710f5cb8fdc6d0790763
[ "MIT" ]
2
2016-04-17T22:06:26.000Z
2016-07-26T21:20:48.000Z
tests/read_only_tests.py
Don42/strichliste-django
0e7e005de7bf4852c544710f5cb8fdc6d0790763
[ "MIT" ]
1
2017-01-04T12:54:35.000Z
2017-01-04T12:54:35.000Z
import json import requests import unittest URL = ("http://", "127.0.0.1", ":", "8000", "/") HEADERS = {'Content-Type': 'application/json'} class EmptyTests(unittest.TestCase): def setUp(self): requests.get(''.join(URL + ('debug/clear/',))) def test_empty_user_list(self): r = requests.get(''.join(URL + ('user',))) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) users = json.loads(r.text) assert {'overall_count', 'limit', 'offset', 'entries'}.issubset(users) assert users['overall_count'] == 0 assert users['limit'] == 100 assert users['offset'] == 0 assert users['entries'] == [] def test_user_not_found(self): r = requests.get(''.join(URL + ('user', '/', '10'))) self.assertEqual(404, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) result = json.loads(r.text) self.assertEqual("user 10 not found", result.get('msg')) def test_user_not_found_transactions(self): r = requests.get(''.join(URL + ('user', '/', '10', '/', 'transaction'))) self.assertEqual(404, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) result = json.loads(r.text) self.assertEqual("user 10 not found", result.get('msg')) def test_empty_transactions(self): r = requests.get(''.join((URL + ('transaction',)))) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) transactions = json.loads(r.text) self.assertEqual(0, transactions.get('overall_count')) self.assertEqual(100, transactions.get('limit')) self.assertEqual(0, transactions.get('offset')) self.assertEqual([], transactions.get('entries')) def test_user_limits(self): r = requests.get(''.join(URL + ('user',)), headers=HEADERS, params={'offset': 1, 'limit': 1}) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) transactions = json.loads(r.text) self.assertEqual(0, transactions.get('overall_count')) self.assertEqual(1, transactions.get('limit')) self.assertEqual(1, transactions.get('offset')) self.assertEqual([], transactions.get('entries')) def test_user_limits_max(self): r = requests.get(''.join(URL + ('user',)), headers=HEADERS, params={'offset': 0, 'limit': 1000}) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) transactions = json.loads(r.text) self.assertEqual(0, transactions.get('overall_count')) self.assertEqual(250, transactions.get('limit')) # 250 is the max self.assertEqual(0, transactions.get('offset')) self.assertEqual([], transactions.get('entries')) def test_transaction_limits(self): r = requests.get(''.join(URL + ('transaction',)), headers=HEADERS, params={'offset': 1, 'limit': 1}) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) transactions = json.loads(r.text) self.assertEqual(0, transactions.get('overall_count')) self.assertEqual(1, transactions.get('limit')) self.assertEqual(1, transactions.get('offset')) self.assertEqual([], transactions.get('entries')) def test_transaction_limits_max(self): r = requests.get(''.join(URL + ('transaction',)), headers=HEADERS, params={'offset': 0, 'limit': 1000}) self.assertEqual(200, r.status_code) self.assertEqual('application/json', r.headers['Content-Type']) transactions = json.loads(r.text) self.assertEqual(0, transactions.get('overall_count')) self.assertEqual(250, transactions.get('limit')) # 250 is the max self.assertEqual(0, transactions.get('offset')) self.assertEqual([], transactions.get('entries'))
44.5
80
0.604869
478
4,272
5.328452
0.123431
0.223793
0.063604
0.063604
0.864547
0.844523
0.844523
0.81115
0.786023
0.786023
0
0.026675
0.227762
4,272
95
81
44.968421
0.745377
0.006788
0
0.674699
0
0
0.155189
0
0
0
0
0
0.518072
1
0.108434
false
0
0.036145
0
0.156627
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
cfd249386e501c864682f04b5c7390c246a5d3a5
7,758
py
Python
tests/test_smith_waterman_gotoh.py
jiayingwang/smart-match
3f273aafce87dc1a97da20baa49b3adceaf53979
[ "MIT" ]
14
2020-09-27T10:20:00.000Z
2022-01-08T19:12:29.000Z
tests/test_smith_waterman_gotoh.py
jiayingwang/smart-match
3f273aafce87dc1a97da20baa49b3adceaf53979
[ "MIT" ]
5
2020-09-28T13:45:13.000Z
2020-12-08T09:24:49.000Z
tests/test_smith_waterman_gotoh.py
jiayingwang/smart-match
3f273aafce87dc1a97da20baa49b3adceaf53979
[ "MIT" ]
19
2020-09-25T12:36:09.000Z
2021-08-16T09:30:48.000Z
import unittest import smart_match class TestSmithWatermanGotoh(unittest.TestCase): def setUp(self): smart_match.use('Smith Waterman Gotoh') def test_similarity(self): self.assertAlmostEqual(float('%.4f' % smart_match.similarity("test string1", "test string2")), 0.9167) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("test", "test string2")), 1.0000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("", "test string2")), 0.0000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("aaa bbb ccc ddd", "aaa bbb ccc eee")), 0.8000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("a b c d", "a b c e")), 0.8571) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Healed", "Sealed")), 0.8333) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Healed", "Healthy")), 0.6667) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Healed", "Heard")), 0.6000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Healed", "Herded")), 0.3333) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Sam J Chapman", "Samuel John Chapman")), 0.7692) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Sam Chapman", "S Chapman")), 0.8889) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("John Smith", "Samuel John Chapman")), 0.5000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("aaa bbb ccc ddd", "aaa bbb ccc eee")), 0.8000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("John Smith", "Sam Chapman")), 0.1500) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("John Smith", "S Chapman")), 0.1111) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","Web Database Applications with PHP & MySQL")), 1.0000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","Building Web Database Applications with Visual Studio 6")), 1.0000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","Web Application Development With PHP")), 0.5000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","WebRAD: Building Database Applications on the Web with Visual FoxPro and Web Connection")), 0.8800) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","Structural Assessment: The Role of Large and Full-Scale Testing")), 0.1000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Database Applications","How to Find a Scholarship Online")), 0.0800) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Aplications","Web Database Applications with PHP & MySQL")), 0.8000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Aplications","Creating Database Web Applications with PHP and ASP")), 0.9667) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Aplications","Web Application Development With PHP")), 0.9000) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Aplications","Structural Assessment: The Role of Large and Full-Scale Testing")), 0.1667) self.assertAlmostEqual(float('%.4f' % smart_match.similarity("Web Aplications","How to Find a Scholarship Online")), 0.1333) self.assertAlmostEqual(float('%.4f' % smart_match.similarity('GGTTGACTA', 'TGTTACGG')), 0.5625) smart_match.set_params(gap=-2, match=3, mismatch=-3) self.assertAlmostEqual(float('%.4f' % smart_match.similarity('GGTTGACTA', 'TGTTACGG')), 0.5417) def test_dissimilarity(self): self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("test string1", "test string2")), 0.0833) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("test", "test string2")), 0.0000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("", "test string2")), 1.0000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("aaa bbb ccc ddd", "aaa bbb ccc eee")), 0.2000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("a b c d", "a b c e")), 0.1429) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Healed", "Sealed")), 0.1667) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Healed", "Healthy")), 0.3333) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Healed", "Heard")), 0.4000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Healed", "Herded")), 0.6667) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Sam J Chapman", "Samuel John Chapman")), 0.2308) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Sam Chapman", "S Chapman")), 0.1111) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("John Smith", "Samuel John Chapman")), 0.5000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("aaa bbb ccc ddd", "aaa bbb ccc eee")), 0.2000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("John Smith", "Sam Chapman")), 0.8500) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("John Smith", "S Chapman")), 0.8889) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications", "Web Database Applications with PHP & MySQL")), 0.0000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications","Building Web Database Applications with Visual Studio 6")), 0.0000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications", "Web Application Development With PHP")), 0.5000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications","WebRAD: Building Database Applications on the Web with Visual FoxPro and Web Connection")), 0.1200) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications","Structural Assessment: The Role of Large and Full-Scale Testing")), 0.9000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Database Applications", "How to Find a Scholarship Online")), 0.9200) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Aplications", "Web Database Applications with PHP & MySQL")), 0.2000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Aplications", "Creating Database Web Applications with PHP and ASP")), 0.0333) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Aplications", "Web Application Development With PHP")), 0.1000) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Aplications","Structural Assessment: The Role of Large and Full-Scale Testing")), 0.8333) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity("Web Aplications", "How to Find a Scholarship Online")), 0.8667) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity('GGTTGACTA', 'TGTTACGG')), 0.4375) smart_match.set_params(gap=-2, match=3, mismatch=-3) self.assertAlmostEqual(float('%.4f' % smart_match.dissimilarity('GGTTGACTA', 'TGTTACGG')), 0.4583) if __name__ == '__main__': unittest.main()
89.172414
197
0.684068
913
7,758
5.733844
0.135816
0.114613
0.278128
0.299522
0.94021
0.926075
0.926075
0.914613
0.72703
0.589876
0
0.054262
0.163831
7,758
86
198
90.209302
0.752736
0
0
0.105263
0
0
0.314256
0
0
0
0
0
0.736842
1
0.039474
false
0
0.026316
0
0.078947
0
0
0
0
null
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
5c79b176670938ed0bf4639ee3805c4b17604919
41,888
py
Python
model_description.py
akshitac8/keras_aud
29de5c30992726f8fda64f03e6559263eb79328d
[ "MIT" ]
7
2018-05-27T16:38:57.000Z
2019-09-20T05:54:30.000Z
model_description.py
akshitac8/keras_aud
29de5c30992726f8fda64f03e6559263eb79328d
[ "MIT" ]
null
null
null
model_description.py
akshitac8/keras_aud
29de5c30992726f8fda64f03e6559263eb79328d
[ "MIT" ]
3
2018-05-22T13:13:51.000Z
2019-06-05T15:36:18.000Z
""" Created on Sat Apr 08 11:48:18 2018 author: @akshitac8 """ from keras.models import Model from keras.layers import Dense, Dropout, Flatten, Input from keras.layers import Conv2D, Conv2DTranspose, merge, Merge from keras.layers import BatchNormalization, Lambda,Activation,Concatenate,RepeatVector,dot from keras.layers import LSTM, GRU, Reshape, Bidirectional, Permute,TimeDistributed from keras.layers import MaxPooling2D, AveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers.merge import Multiply from keras import backend as K import numpy as np ############################# Keras shape ############################# def kr(t,m=None): if m is None: return t._keras_shape else: return t._keras_shape[m] ###########################FUNCTIONAL MODELS############################################# ########################### BASIC DNN ################################# def dnn(dimx,dimy,num_classes,**kwargs): """ Deep Neural Network containing 3 Dense layers each followed by a Dropout. Parameters ---------- input_neurons : int default : 200 Number of Neurons for each Dense layer. dropout : float default : 0.1 Dropout used after each Dense Layer. act1 : str default : relu Activation used after 1st layer. act2 : str default : relu Activation used after 2nd layer. act3 : str default : relu Activation used after 3rd layer. act4 : str default : softmax Activation used after 4th layer. print_sum : bool default : False Print summary if the model loss default : categorical_crossentropy Loss used optimizer default : adam Optimizer used metrics default : accuracy Metrics used. Returns ------- DNN Model """ input_neurons = kwargs['kwargs'].get('input_neurons',200) dropout = kwargs['kwargs'].get('dropout',0.1) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','relu') act3 = kwargs['kwargs'].get('act3','relu') act4 = kwargs['kwargs'].get('act4','softmax') print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','binary_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') print "Model DNN" print "Activation 1 {} 2 {} 3 {} 4 {}".format(act1,act2,act3,act4) print "Neurons {} Dropout {}".format(input_neurons,dropout) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) input_dim = dimx * dimy inpx = Input(shape=(input_dim,)) x = Dense(input_neurons, activation=act1)(inpx) x = Dropout(dropout)(x) x = Dense(input_neurons, activation=act2)(x) x = Dropout(dropout)(x) x = Dense(input_neurons, activation=act3)(x) x = Dropout(dropout)(x) score = Dense(num_classes, activation=act4)(x) model = Model([inpx],score) if print_sum: model.summary() model.compile(loss=loss,optimizer=optimizer,metrics=[metrics]) return model ########################### BASIC CNN ################################# def cnn(dimx,dimy,num_classes,**kwargs): """ Convolution Neural Network containing 1 Convolution layer followed by a Dense Layer. Parameters ---------- input_neurons : int default : 200 Number of Neurons for the Dense layer. dropout : float default : 0.1 Dropout used after the Dense Layer. act1 : str default : relu Activation used after 1st Convolution layer. act2 : str default : relu Activation used after 1st Dense layer. act3 : str default : softmax Activation used after last Dense layer. print_sum : bool default : False Print summary if the model nb_filter : int default : 100 Number of kernels filter_length : int, tuple default : 3 Size of kernels pool_size : int, tuple default : (2,2) Pooling size. loss default : categorical_crossentropy Loss used optimizer default : adam Optimizer used metrics default : accuracy Metrics used. Returns ------- CNN Model """ input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','relu') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',128) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',2) print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') if type(nb_filter) is int: nb_filter = [nb_filter] * 2 if type(pool_size) is int: pool_size = [pool_size] * 2 print "Model CNN" print "Activation 1 {} 2 {} 3 {}".format(act1,act2,act3) print "Neurons {} Dropout {}".format(input_neurons,dropout) print "Kernels {} Size {} Poolsize {}".format(nb_filter,filter_length,pool_size) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) inpx = Input(shape=(1,dimx,dimy),name='inpx') x = Conv2D(filters=nb_filter[0], kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx) hx = MaxPooling2D(pool_size=pool_size[0])(x) x = Conv2D(filters=nb_filter[1], kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx) hx = MaxPooling2D(pool_size=pool_size[1])(x) h = Flatten()(hx) wrap = Dense(input_neurons, activation=act2,name='wrap')(h) wrap= Dropout(dropout)(wrap) score = Dense(num_classes,activation=act3,name='score')(wrap) model = Model([inpx],score) if print_sum: model.summary() model.compile(loss=loss,optimizer=optimizer,metrics=[metrics]) return model ########################### BASIC RNN ################################# def rnn(dimx,dimy,num_classes,**kwargs): """ Deep Neural Network containing 1 LSTM layers followed by 3 Dense Layers. Parameters ---------- rnn_units : int default : 32 Number of Units for LSTM layer. input_neurons : int default : 200 Number of Neurons for each Dense layer. act1 : str default : relu Activation used after 1st layer. act2 : str default : relu Activation used after 2nd layer. act3 : str default : relu Activation used after 3rd layer. act4 : str default : softmax Activation used after 4th layer. print_sum : bool default : False Print summary if the model loss default : categorical_crossentropy Loss used optimizer default : adam Optimizer used metrics default : accuracy Metrics used. Returns ------- RNN Model """ rnn_units = kwargs['kwargs'].get('rnn_units',32) input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','relu') act3 = kwargs['kwargs'].get('act3','relu') act4 = kwargs['kwargs'].get('act4','sigmoid') print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') input_dim=dimx*dimy main_input = Input(shape=(1,input_dim), name='main_input') x = LSTM(rnn_units)(main_input) # We stack a deep densely-connected network on top x = Dense(input_neurons, activation=act1)(x) x = Dense(input_neurons, activation=act2)(x) x = Dense(input_neurons, activation=act3)(x) # And finally we add the main logistic regression layer main_output = Dense(num_classes, activation=act4, name='main_output')(x) model = Model(inputs=main_input, outputs=main_output) if print_sum: model.summary() model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ########################### BASIC CRNN ################################# def cnn_rnn(dimx,dimy,num_classes,**kwargs): """ Deep Neural Network containing 1 LSTM layers followed by 3 Dense Layers. Parameters ---------- rnn_units : int default : 32 Number of Units for LSTM layer. input_neurons : int default : 200 Number of Neurons for each Dense layer. dropout : float default : 0.1 Dropout used after the Dense Layer. act1 : str default : relu Activation used after Convolution layer. act2 : str default : tanh Activation used after Recurrent layer. act3 : str default : softmax Activation used after Dense layer. print_sum : bool default : False Print summary if the model nb_filter : int default : 100 Number of kernels filter_length : int, tuple default : 3 Size of kernels pool_size : int, tuple default : (2,2) Pooling size. loss default : categorical_crossentropy Loss used optimizer default : adam Optimizer used metrics default : accuracy Metrics used. Returns ------- RNN Model """ rnn_units = kwargs['kwargs'].get('rnn_units',32) input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','tanh') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',100) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',(2,2)) print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') main_input = Input(shape=(1,dimx,dimy)) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(main_input) hx = MaxPooling2D(pool_size=pool_size)(x) wrap= Dropout(dropout)(hx) x = Permute((2,1,3))(wrap) a,b,c,d= kr(x) x = Reshape((b*d,c))(x) x = LSTM(rnn_units,activation=act2)(x) wrap= Dropout(dropout)(x) x = Dense(input_neurons, activation=act3)(wrap) main_output = Dense(num_classes, activation='softmax', name='main_output')(wrap) model = Model(inputs=main_input, outputs=main_output) if print_sum: model.summary() model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ############################# BASIC CBRNN ############################# def cbrnn(dimx,dimy,num_classes,**kwargs): """ CNN with biderectional lstm Parameters ---------- rnn_units : int default : 32 Number of Units for LSTM layer. dropout : float default : 0.1 Dropout used after the Dense Layer. act1 : str default : relu Activation used after 4 Convolution layers. act2 : str default : sigmoid Activation used after Recurrent layer. act3 : str default : sigmoid Activation used after Dense layer. print_sum : bool default : False Print summary if the model nb_filter : int default : 100 Number of kernels filter_length : int, tuple default : 3 Size of kernels pool_size : int, tuple default : (2,2) Pooling size. loss default : categorical_crossentropy Loss used optimizer default : adam Optimizer used metrics default : accuracy Metrics used. Returns ------- CBRNN Model """ rnn_units = kwargs['kwargs'].get('rnn_units',32) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','sigmoid') act3 = kwargs['kwargs'].get('act3','sigmoid') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',100) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',(2,2)) print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','binary_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','mse') print "Functional CBRNN" print "Activation 1 {} 2 {} 3 {}".format(act1,act2,act3) print "Dropout {}".format(dropout) print "Kernels {} Size {} Poolsize {}".format(nb_filter,filter_length,pool_size) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) main_input = Input(shape=(1,dimx,dimy)) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(main_input) #x1=BatchNormalization()(x) hx = MaxPooling2D(pool_size=pool_size)(x) # wrap= Dropout(dropout)(hx) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(hx) #x2=BatchNormalization()(x) hx = MaxPooling2D(pool_size=pool_size)(x) # wrap= Dropout(dropout)(hx) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(hx) #x3=BatchNormalization()(x) hx = MaxPooling2D(pool_size=(2,2))(x) # wrap= Dropout(dropout)(hx) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(hx) # x4=BatchNormalization()(x) hx = MaxPooling2D(pool_size=(1,1))(x) wrap= Dropout(dropout)(x) x = Permute((2,1,3))(wrap) a,b,c,d= kr(x) x = Reshape((b*d,c))(x) # x = Reshape((c*d,b))(x) # w = Bidirectional(LSTM(rnn_units,activation=act2,return_sequences=False))(x) rnnout = Bidirectional(LSTM(rnn_units, activation=act2, return_sequences=True))(x) rnnout_gate = Bidirectional(LSTM(rnn_units, activation=act3, return_sequences=False))(x) w = Multiply()([rnnout, rnnout_gate]) wrap= Dropout(dropout)(w) wrap=Flatten()(wrap) main_output = Dense(num_classes, activation=act3, name='main_output')(wrap) model = Model(inputs=main_input, outputs=main_output) if print_sum: model.summary() model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ############################ Multi CNN : Ensemble model combining different features ################################ def multi_cnn(dimx,dimy,num_classes,**kwargs): """ This model is used to combine same or complementary features through a mini ensemble convolution model based on their properties. """ input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','tanh') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',100) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',(2,2)) print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') inps,outs=[],[] for i in range(len(dimy)): inpx = Input(shape=(1,dimx,dimy[i])) inps.append(inpx) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx) x = MaxPooling2D(pool_size=pool_size)(x) x= Dropout(dropout)(x) h = Flatten()(x) outs.append(h) combine = Merge(mode='concat')(outs) # And finally we add the main logistic regression layer wrap = Dense(input_neurons, activation=act2,name='wrap')(combine) main_output = Dense(num_classes,activation=act3,name='score')(wrap) model = Model(inputs=inps,outputs=main_output) if print_sum: model.summary() model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ############################ Transpose CNN ################################ def transpose_cnn(dimx,dimy,num_classes,**kwargs): """ The first section of the neural network contains conv layers. The deconv layer after conv layer maintains the same shape. The last layer will be a conv layer to calculate class wise score. Emphasis is given to check the size parameter for model. This is used for acoustic event detection. """ act1 = kwargs['kwargs'].get('act1','tanh') act2 = kwargs['kwargs'].get('act2','tanh') act3 = kwargs['kwargs'].get('act3','sigmoid') nb_filter = kwargs['kwargs'].get('nb_filter',128) pool_size = kwargs['kwargs'].get('pool_size',(1,2)) dropout = kwargs['kwargs'].get('dropout',0.1) print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','binary_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','mse') if type(nb_filter) is int: nb_filter = [nb_filter] * 2 inpx = Input(shape=(1,dimx,dimy),name='inpx') x = Conv2D(filters=nb_filter[0], kernel_size=5, data_format='channels_first', padding='same', activation=act1)(inpx) hx = MaxPooling2D(pool_size=pool_size)(x) #hx = ZeroPadding2D(padding=(2, 1))(hx) hx = Conv2D(filters=nb_filter[1], kernel_size=3, data_format='channels_first', padding='same', activation=act1)(hx) x=Conv2DTranspose(filters=nb_filter[1], kernel_size=3,padding='same', data_format='channels_first',activation=act2)(hx) hx = MaxPooling2D(pool_size=pool_size)(x) x=Conv2DTranspose(filters=nb_filter[0], kernel_size=5,padding='same', data_format='channels_first',activation=act2)(hx) hx = MaxPooling2D(pool_size=pool_size)(x) # Don't use softmax in last layer score=Conv2D(filters=num_classes, kernel_size=(1,1),padding='same', data_format='channels_first',activation=act3)(hx) # Check for compiling # wrap= Dropout(dropout)(score) score=GlobalAveragePooling2D(data_format='channels_first')(score) kr(score) model = Model(inputs=[inpx], outputs=[score]) if print_sum: model.summary() model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ##################### Sequence2Sequence Model ############################ def seq2seq(dimx,dimy,num_classes,**kwargs): # Recurrent sequence to sequence learning auto encoders for audio classification task print "seq2seq_lstm" ## encoder x = Input(shape=(dimx,dimy)) # Encoder encoder=Bidirectional(LSTM(32,return_state=True)) encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder(x) state_h = Concatenate(axis=1)([forward_h, backward_h]) state_c = Concatenate(axis=1)([forward_c, backward_c]) encoder_states = [state_h, state_c] #hidden_1 = Dense(128, activation='relu')(encoder_outputs) #h = Dense(64, activation='relu')(hidden_1) # Decoder y = Input(shape=(dimx,dimy)) decoder_lstm = LSTM(64, return_sequences=True, return_state=True) decoder_outputs, _, _ = decoder_lstm(y, initial_state=encoder_states) #hidden_2 = Dense(128, activation='relu')(decoder_outputs) r = Dense(40, activation='sigmoid')(decoder_outputs) model = Model([x,y], r) model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy']) return model ####################### ATTENTION MODEL ACRNN ################################## def ACRNN(dimx,dimy,num_classes,**kwargs): act1 = kwargs['kwargs'].get('act1','tanh') nb_filter = kwargs['kwargs'].get('nb_filter',72) filter_length = kwargs['kwargs'].get('filter_length',4) act2 = kwargs['kwargs'].get('act2','linear') rnn_units = kwargs['kwargs'].get('rnn_units',[20,20]) dropout = kwargs['kwargs'].get('dropout',[0.1,0.2]) act3 = kwargs['kwargs'].get('act3','softmax') print_sum = kwargs['kwargs'].get('print_sum',False) loss = kwargs['kwargs'].get('loss','binary_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','mse') #input shape main_input = Input(shape=(1,dimx,dimy)) #CNN x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(main_input) hx = MaxPooling2D(pool_size=(1,2))(x) x = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1,use_bias=False)(hx) hx = MaxPooling2D(pool_size=(1,2))(x) wrap= Dropout(dropout[0])(hx) x = Permute((2,1,3))(wrap) a,b,c,d= kr(x) x = Reshape((b*d,c))(x) #RNN LAYERS rnnout = Bidirectional(GRU(rnn_units[0],activation=act2, return_sequences=True),merge_mode='concat')(x) rnnout_1 = Bidirectional(GRU(rnn_units[1],activation='sigmoid', return_sequences=True),merge_mode='concat')(rnnout) w = Multiply()([rnnout, rnnout_1]) #Attention starts hidden_size = int(w._keras_shape[1]) a = Permute((2, 1))(w) a = Reshape((hidden_size, a._keras_shape[1]))(a) a = TimeDistributed(Dense( a._keras_shape[1], activation='softmax',use_bias=False))(a) a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a) a = RepeatVector(dimy)(a) a_probs = Permute((2, 1), name='attention_vec')(a) attention_mul = merge([w,a_probs], name='attention_mul', mode='mul') attention_mul = GlobalMaxPooling1D()(attention_mul) attention_mul = Dropout(dropout[1])(attention_mul) # compile Model main_output = Dense(num_classes, activation=act3)(attention_mul) mymodel = Model([main_input], main_output) if print_sum: mymodel.summary() mymodel.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return mymodel ''' Architectures with more than one feature and models ''' def ensemble_cnn(dimx0,dimy0,dimx1,dimy1,num_classes,**kwargs): print "No Dropout used" input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','sigmoid') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',128) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',2) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','mse') if type(filter_length) is int: filter_length = [filter_length] * 2 if type(pool_size) is int: pool_size = [pool_size] * 2 print "Model ensemble CNN" print "Activation 1 {} 2 {} 3 {}".format(act1,act2,act3) print "Neurons {} Dropout {}".format(input_neurons,dropout) print "Kernels {} Size {} Poolsize {}".format(nb_filter,filter_length,pool_size) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) inpx0 = Input(shape=(1,dimx0,dimy0),name='inpx0') inpx1 = Input(shape=(1,dimx1,dimy1),name='inpx1') """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" x0 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx0) x1 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx0 = MaxPooling2D(pool_size=pool_size)(x0) hx1 = MaxPooling2D(pool_size=pool_size)(x1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" x1 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(hx0) x2 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(hx1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx1 = MaxPooling2D(pool_size=pool_size)(x1) hx2 = MaxPooling2D(pool_size=pool_size)(x2) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" h0 = Flatten()(hx1) h1 = Flatten()(hx2) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" combine = Concatenate()([h0,h1]) wrap = Dense(input_neurons, activation=act2,name='wrap')(combine) score = Dense(num_classes,activation=act3,name='score')(wrap) model = Model([inpx0,inpx1],score) # sgd_x = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # op_x = optimizers.Adagrad() # print "Adagrad optimiser used" model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model def ensemble_rnn(dimx0,dimy0,dimx1,dimy1,num_classes,**kwargs): print "No Dropout used" rnn_units = kwargs['kwargs'].get('rnn_units',32) input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','sigmoid') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) pool_size = kwargs['kwargs'].get('pool_size',2) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') if type(pool_size) is int: pool_size = [pool_size] * 2 print "Model Ensemble RNN" print "Activation 1 {} 2 {} 3 {}".format(act1,act2,act3) print "Neurons {} Dropout {}".format(input_neurons,dropout) print "Poolsize {}".format(pool_size) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) inpx0 = Input(shape=(1,dimx0,dimy0),name='inpx0') inpx1 = Input(shape=(1,dimx1,dimy1),name='inpx1') """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" x0 = LSTM(rnn_units, activation='relu',return_sequences=True, unroll=True)(inpx0) x1 = LSTM(rnn_units, activation='sigmoid',return_sequences=True, unroll=True)(inpx1) x2 = LSTM(rnn_units, activation='relu',return_sequences=False, unroll=True)(x0) x3 = LSTM(rnn_units, activation='relu',return_sequences=False, unroll=True)(x1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" h0 = Dense(input_neurons, activation=act1)(x2) h1 = Dense(input_neurons, activation=act2)(x3) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" combine = Concatenate()([h0,h1]) wrap = Dense(input_neurons, activation=act2,name='wrap')(combine) score = Dense(num_classes,activation=act3,name='score')(wrap) model = Model([inpx0,inpx1],score) # sgd_x = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # op_x = optimizers.Adagrad() # print "Adagrad optimiser used" model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model def ensemble_cnn_rnn(dimx0,dimy0,dimx1,dimy1,num_classes,**kwargs): print "No Dropout used" rnn_units = kwargs['kwargs'].get('rnn_units',32) input_neurons = kwargs['kwargs'].get('input_neurons',200) act1 = kwargs['kwargs'].get('act1','relu') act2 = kwargs['kwargs'].get('act2','sigmoid') act3 = kwargs['kwargs'].get('act3','softmax') dropout = kwargs['kwargs'].get('dropout',0.1) nb_filter = kwargs['kwargs'].get('nb_filter',100) filter_length = kwargs['kwargs'].get('filter_length',3) pool_size = kwargs['kwargs'].get('pool_size',2) loss = kwargs['kwargs'].get('loss','categorical_crossentropy') optimizer = kwargs['kwargs'].get('optimizer','adam') metrics = kwargs['kwargs'].get('metrics','accuracy') if type(filter_length) is int: filter_length = [filter_length] * 2 if type(pool_size) is int: pool_size = [pool_size] * 2 print "Model Ensemble RNN" print "Activation 1 {} 2 {} 3 {}".format(act1,act2,act3) print "Neurons {} Dropout {}".format(input_neurons,dropout) print "Kernels {} Size {} Poolsize {}".format(nb_filter,filter_length,pool_size) print "Loss {} Optimizer {} Metrics {}".format(loss,optimizer,metrics) inpx0 = Input(shape=(1,dimx0,dimy0),name='inpx0') inpx1 = Input(shape=(1,dimx1,dimy1),name='inpx1') """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" x0 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx0) x1 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(inpx1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx0 = MaxPooling2D(pool_size=pool_size)(x0) hx1 = MaxPooling2D(pool_size=pool_size)(x1) wrap0= Dropout(dropout)(hx0) wrap1= Dropout(dropout)(hx1) x2 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap0) x3 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx2 = MaxPooling2D(pool_size=pool_size)(x2) hx3 = MaxPooling2D(pool_size=pool_size)(x3) wrap2= Dropout(dropout)(hx2) wrap3= Dropout(dropout)(hx3) x4 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap2) x5 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap3) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx4 = MaxPooling2D(pool_size=pool_size)(x4) hx5 = MaxPooling2D(pool_size=pool_size)(x5) wrap4= Dropout(dropout)(hx4) wrap5= Dropout(dropout)(hx5) x6 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap4) x7 = Conv2D(filters=nb_filter, kernel_size=filter_length, data_format='channels_first', padding='same', activation=act1)(wrap5) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" hx6 = MaxPooling2D(pool_size=pool_size)(x6) hx7 = MaxPooling2D(pool_size=pool_size)(x7) wrap6= Dropout(dropout)(hx6) wrap7= Dropout(dropout)(hx7) x8 = Permute((2,1,3))(wrap6) a,b,c,d= kr(x8) x8 = Reshape((b*d,c))(x8) x9 = Permute((2,1,3))(wrap7) a,b,c,d= kr(x9) x9 = Reshape((b*d,c))(x9) x_0 = LSTM(rnn_units, activation='relu',return_sequences=True)(x8) x_1 = LSTM(rnn_units, activation='sigmoid',return_sequences=True)(x9) x_2 = LSTM(rnn_units, activation='relu',return_sequences=False)(x_0) x_3 = LSTM(rnn_units, activation='relu',return_sequences=False)(x_1) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" h0 = Dense(input_neurons, activation=act1)(x_2) h1 = Dense(input_neurons, activation=act2)(x_3) """"""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""" combine = Concatenate()([h0,h1]) wrap = Dense(input_neurons, activation=act2,name='wrap')(combine) score = Dense(num_classes,activation=act3,name='score')(wrap) model = Model([inpx0,inpx1],score) # sgd_x = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # op_x = optimizers.Adagrad() # print "Adagrad optimiser used" model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) return model ########################################### DYNAMIC MODELS ########################################### """ Dynamic Models can be accessed by """ ########################### DYNAMIC DNN ################################# def dnn_dynamic(num_classes,input_dim,acts,**kwargs): input_neurons = kwargs['kwargs'].get('input_neurons',200) drops = kwargs['kwargs'].get('drops',[]) dnn_layers = kwargs['kwargs'].get('dnn_layers',1) last_act = kwargs['kwargs'].get('last_act','softmax') end_dense = kwargs['kwargs'].get('end_dense',{}) if not np.all([len(acts)==dnn_layers]): print "Layers Mismatch" return False x = Input(shape=(input_dim,),name='inpx') inpx = x for i in range(dnn_layers): x = Dense(input_neurons,activation=acts[i])(inpx) if drops != []: x = Dropout(drops[i])(x) if end_dense != {}: x = Dense(end_dense['input_neurons'], activation=end_dense['activation'],name='wrap')(x) try: x = Dropout(end_dense['dropout'])(x) except: pass score = Dense(num_classes,activation=last_act,name='score')(x) model = Model(inpx,score) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model ########################### DYNAMIC CNN ################################# def cnn_dynamic(num_classes,dimx,dimy,acts,**kwargs): cnn_layers = kwargs['kwargs'].get('cnn_layers',1) nb_filter = kwargs['kwargs'].get('nb_filter',[]) filter_length = kwargs['kwargs'].get('filter_length',[]) pools = kwargs['kwargs'].get('pools',[]) drops = kwargs['kwargs'].get('drops',[]) bn = kwargs['kwargs'].get('batch_norm',False) end_dense = kwargs['kwargs'].get('end_dense',{}) last_act = kwargs['kwargs'].get('last_act','softmax') if not np.all([len(acts)==cnn_layers,len(nb_filter)==cnn_layers,len(filter_length)==cnn_layers]): raise Exception("Layers Mismatch") x = Input(shape=(1,dimx,dimy),name='inpx') inpx = x for i in range(cnn_layers): x = Conv2D(filters=nb_filter[i], kernel_size=filter_length[i], data_format='channels_first', padding='same', activation=acts[i])(x) if bn: x=BatchNormalization()(x) if pools != []: if pools[i][0]=='max': x = MaxPooling2D(pool_size=pools[i][1])(x) elif pools[i][0]=='avg': x = AveragePooling2D(pool_size=pools[i][1])(x) elif pools[i][0]=='globmax': x = GlobalMaxPooling2D()(x) elif pools[i][0]=='globavg': x = GlobalAveragePooling2D()(x) if drops != []: x = Dropout(drops[i])(x) if pools[-1][0]=='max' or pools[-1][0]=='avg': x = Flatten()(x) if end_dense != {}: x = Dense(end_dense['input_neurons'], activation=end_dense['activation'],name='wrap')(x) try: x = Dropout(end_dense['dropout'])(x) except: pass score = Dense(num_classes,activation=last_act,name='score')(x) model = Model(inpx,score) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model ############################# DYNAMIC CBRNN ############################# def cbrnn_dynamic(num_classes,dimx,dimy,acts,**kwargs): """ """ pools = kwargs['kwargs'].get('pools',[]) drops = kwargs['kwargs'].get('drops',[]) bn = kwargs['kwargs'].get('batch_norm',False) end_dense = kwargs['kwargs'].get('end_dense',{}) last_act = kwargs['kwargs'].get('last_act','softmax') cnn_layers = kwargs['kwargs'].get('cnn_layers',1) rnn_layers = kwargs['kwargs'].get('rnn_layers',1) rnn_type = kwargs['kwargs'].get('rnn_type','LSTM') rnn_units = kwargs['kwargs'].get('rnn_units',[]) nb_filter = kwargs['kwargs'].get('nb_filter',[]) filter_length = kwargs['kwargs'].get('filter_length',[]) #CNN with biderectional lstm print "CBRNN" if not np.all([len(acts)==cnn_layers,len(nb_filter)==cnn_layers,len(filter_length)==cnn_layers]): print "Layers Mismatch" return False x = Input(shape=(1,dimx,dimy),name='inpx') inpx = x for i in range(cnn_layers): x = Conv2D(filters=nb_filter[i], kernel_size=filter_length[i], data_format='channels_first', padding='same', activation=acts[i])(x) if bn: x=BatchNormalization()(x) if pools != []: if pools[i][0]=='max': x = MaxPooling2D(pool_size=pools[i][1])(x) elif pools[i][0]=='avg': x = AveragePooling2D(pool_size=pools[i][1])(x) if drops != []: x = Dropout(drops[i])(x) x = Permute((2,1,3))(x) a,b,c,d= kr(x) x = Reshape((b*d,c))(x) for i in range(rnn_layers): #Only last layer can have return_sequences as False r = False if i == rnn_layers-1 else True if rnn_type=='LSTM': x = LSTM(rnn_units[i],return_sequences=r)(x) elif rnn_type=='GRU': x = Bidirectional(GRU(rnn_units[i],return_sequences=r))(x) elif rnn_type=='bdLSTM': x = Bidirectional(LSTM(rnn_units[i],return_sequences=r))(x) elif rnn_type=='bdGRU': x = Bidirectional(GRU(rnn_units[i],return_sequences=r))(x) x= Dropout(0.1)(x) if end_dense != {}: x = Dense(end_dense['input_neurons'], activation=end_dense['activation'],name='wrap')(x) try: x = Dropout(end_dense['dropout'])(x) except: pass main_output = Dense(num_classes, activation=last_act, name='main_output')(x) model = Model(inputs=inpx, outputs=main_output) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
36.017197
124
0.568635
4,787
41,888
4.838939
0.072488
0.076153
0.095191
0.029788
0.816396
0.786306
0.75695
0.718831
0.669833
0.642592
0
0.023007
0.244581
41,888
1,162
125
36.048193
0.709035
0.039725
0
0.721127
0
0
0.136302
0.007922
0
0
0
0
0
0
null
null
0.004225
0.012676
null
null
0.073239
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
5c84ebe36aebe29f6fd6426b496803ebfd2ecf4a
165
py
Python
oozer/common/cold_storage/__init__.py
panoramichq/data-collection-fb
550b90a303c880ae8c3dfd2801dc4f991a969f89
[ "MIT" ]
null
null
null
oozer/common/cold_storage/__init__.py
panoramichq/data-collection-fb
550b90a303c880ae8c3dfd2801dc4f991a969f89
[ "MIT" ]
null
null
null
oozer/common/cold_storage/__init__.py
panoramichq/data-collection-fb
550b90a303c880ae8c3dfd2801dc4f991a969f89
[ "MIT" ]
null
null
null
# flake8: noqa: F401 from oozer.common.cold_storage.base_store import store # for backward compat from oozer.common.cold_storage.batch_store import ChunkDumpStore
33
77
0.830303
24
165
5.541667
0.666667
0.135338
0.225564
0.285714
0.390977
0
0
0
0
0
0
0.027211
0.109091
165
4
78
41.25
0.877551
0.230303
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
5cc3664c82996ad2b72e918e28bfde957c8159e7
22,055
py
Python
backend/api/tests/api/test_document.py
vtmoreau/doccano
f0b94c3e56a917d8b2ee6d5d3efd6b900a6eaacb
[ "MIT" ]
null
null
null
backend/api/tests/api/test_document.py
vtmoreau/doccano
f0b94c3e56a917d8b2ee6d5d3efd6b900a6eaacb
[ "MIT" ]
null
null
null
backend/api/tests/api/test_document.py
vtmoreau/doccano
f0b94c3e56a917d8b2ee6d5d3efd6b900a6eaacb
[ "MIT" ]
null
null
null
from django.conf import settings from django.contrib.auth.models import User from model_mommy import mommy from rest_framework import status from rest_framework.reverse import reverse from rest_framework.test import APITestCase from ...models import Document from .utils import (TestUtilsMixin, assign_user_to_role, create_default_roles, remove_all_role_mappings) class TestDocumentListAPI(APITestCase, TestUtilsMixin): @classmethod def setUpTestData(cls): cls.project_member_name = 'project_member_name' cls.project_member_pass = 'project_member_pass' cls.non_project_member_name = 'non_project_member_name' cls.non_project_member_pass = 'non_project_member_pass' cls.super_user_name = 'super_user_name' cls.super_user_pass = 'super_user_pass' create_default_roles() project_member = User.objects.create_user(username=cls.project_member_name, password=cls.project_member_pass) non_project_member = User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass) super_user = User.objects.create_superuser(username=cls.super_user_name, password=cls.super_user_pass, email='fizz@buzz.com') cls.main_project = mommy.make('TextClassificationProject', users=[project_member, super_user]) doc1 = mommy.make('Document', project=cls.main_project) doc2 = mommy.make('Document', project=cls.main_project) mommy.make('Document', project=cls.main_project) cls.random_order_project = mommy.make('TextClassificationProject', users=[project_member, super_user], randomize_document_order=True) mommy.make('Document', 100, project=cls.random_order_project) sub_project = mommy.make('TextClassificationProject', users=[non_project_member]) mommy.make('Document', project=sub_project) cls.url = reverse(viewname='doc_list', args=[cls.main_project.id]) cls.random_order_project_url = reverse(viewname='doc_list', args=[cls.random_order_project.id]) cls.data = {'text': 'example'} assign_user_to_role(project_member=project_member, project=cls.main_project, role_name=settings.ROLE_ANNOTATOR) assign_user_to_role(project_member=project_member, project=cls.random_order_project, role_name=settings.ROLE_ANNOTATOR) mommy.make('DocumentAnnotation', document=doc1, user=project_member) mommy.make('DocumentAnnotation', document=doc2, user=project_member) def _test_list(self, url, username, password, expected_num_results): self.client.login(username=username, password=password) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json().get('results')), expected_num_results) def test_returns_docs_to_project_member(self): self._test_list(self.url, username=self.project_member_name, password=self.project_member_pass, expected_num_results=3) def test_returns_docs_to_project_member_filtered_to_active(self): self._test_list('{}?doc_annotations__isnull=true'.format(self.url), username=self.project_member_name, password=self.project_member_pass, expected_num_results=1) def test_returns_docs_to_project_member_filtered_to_completed(self): self._test_list('{}?doc_annotations__isnull=false'.format(self.url), username=self.project_member_name, password=self.project_member_pass, expected_num_results=2) def test_returns_docs_to_project_member_filtered_to_active_with_collaborative_annotation(self): self._test_list('{}?doc_annotations__isnull=true'.format(self.url), username=self.super_user_name, password=self.super_user_pass, expected_num_results=3) self._patch_project(self.main_project, 'collaborative_annotation', True) self._test_list('{}?doc_annotations__isnull=true'.format(self.url), username=self.super_user_name, password=self.super_user_pass, expected_num_results=1) def test_returns_docs_to_project_member_filtered_to_completed_with_collaborative_annotation(self): self._test_list('{}?doc_annotations__isnull=false'.format(self.url), username=self.super_user_name, password=self.super_user_pass, expected_num_results=0) self._patch_project(self.main_project, 'collaborative_annotation', True) self._test_list('{}?doc_annotations__isnull=false'.format(self.url), username=self.super_user_name, password=self.super_user_pass, expected_num_results=2) def test_returns_docs_in_consistent_order_for_all_users(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) user1_documents = self.client.get(self.url, format='json').json().get('results') self.client.logout() self.client.login(username=self.super_user_name, password=self.super_user_pass) user2_documents = self.client.get(self.url, format='json').json().get('results') self.client.logout() self.assertEqual([doc['id'] for doc in user1_documents], [doc['id'] for doc in user2_documents]) def test_can_return_docs_in_consistent_random_order(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) user1_documents1 = self.client.get(self.random_order_project_url, format='json').json().get('results') user1_documents2 = self.client.get(self.random_order_project_url, format='json').json().get('results') self.client.logout() self.assertEqual(user1_documents1, user1_documents2) self.client.login(username=self.super_user_name, password=self.super_user_pass) user2_documents1 = self.client.get(self.random_order_project_url, format='json').json().get('results') user2_documents2 = self.client.get(self.random_order_project_url, format='json').json().get('results') self.client.logout() self.assertEqual(user2_documents1, user2_documents2) self.assertNotEqual(user1_documents1, user2_documents1) self.assertNotEqual(user1_documents2, user2_documents2) def test_do_not_return_docs_to_non_project_member(self): self.client.login(username=self.non_project_member_name, password=self.non_project_member_pass) response = self.client.get(self.url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_do_not_return_docs_of_other_projects(self): self._test_list(self.url, username=self.project_member_name, password=self.project_member_pass, expected_num_results=self.main_project.documents.count()) def test_allows_superuser_to_create_doc(self): self.client.login(username=self.super_user_name, password=self.super_user_pass) response = self.client.post(self.url, format='json', data=self.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_disallows_project_member_to_create_doc(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.post(self.url, format='json', data=self.data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @classmethod def doCleanups(cls): remove_all_role_mappings() class TestDocumentDetailAPI(APITestCase): @classmethod def setUpTestData(cls): cls.project_member_name = 'project_member_name' cls.project_member_pass = 'project_member_pass' cls.non_project_member_name = 'non_project_member_name' cls.non_project_member_pass = 'non_project_member_pass' cls.super_user_name = 'super_user_name' cls.super_user_pass = 'super_user_pass' create_default_roles() project_member = User.objects.create_user(username=cls.project_member_name, password=cls.project_member_pass) non_project_member = User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass) # Todo: change super_user to project_admin. super_user = User.objects.create_superuser(username=cls.super_user_name, password=cls.super_user_pass, email='fizz@buzz.com') project = mommy.make('TextClassificationProject', users=[project_member, super_user]) cls.doc = mommy.make('Document', project=project) cls.url = reverse(viewname='doc_detail', args=[project.id, cls.doc.id]) cls.data = {'text': 'example'} assign_user_to_role(project_member=project_member, project=project, role_name=settings.ROLE_ANNOTATOR) def test_returns_doc_to_project_member(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json') self.assertEqual(response.data['id'], self.doc.id) def test_do_not_return_doc_to_non_project_member(self): self.client.login(username=self.non_project_member_name, password=self.non_project_member_pass) response = self.client.get(self.url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_allows_superuser_to_update_doc(self): self.client.login(username=self.super_user_name, password=self.super_user_pass) response = self.client.patch(self.url, format='json', data=self.data) self.assertEqual(response.data['text'], self.data['text']) def test_disallows_project_member_to_update_doc(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.patch(self.url, format='json', data=self.data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_allows_superuser_to_delete_doc(self): self.client.login(username=self.super_user_name, password=self.super_user_pass) response = self.client.delete(self.url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_disallows_project_member_to_delete_doc(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.delete(self.url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @classmethod def doCleanups(cls): remove_all_role_mappings() class TestApproveLabelsAPI(APITestCase): @classmethod def setUpTestData(cls): cls.annotator_name = 'annotator_name' cls.annotator_pass = 'annotator_pass' cls.approver_name = 'approver_name_name' cls.approver_pass = 'approver_pass' cls.project_admin_name = 'project_admin_name' cls.project_admin_pass = 'project_admin_pass' create_default_roles() annotator = User.objects.create_user(username=cls.annotator_name, password=cls.annotator_pass) approver = User.objects.create_user(username=cls.approver_name, password=cls.approver_pass) project_admin = User.objects.create_user(username=cls.project_admin_name, password=cls.project_admin_pass) project = mommy.make('TextClassificationProject', users=[annotator, approver, project_admin]) cls.doc = mommy.make('Document', project=project) cls.url = reverse(viewname='approve_labels', args=[project.id, cls.doc.id]) assign_user_to_role(project_member=annotator, project=project, role_name=settings.ROLE_ANNOTATOR) assign_user_to_role(project_member=approver, project=project, role_name=settings.ROLE_ANNOTATION_APPROVER) assign_user_to_role(project_member=project_admin, project=project, role_name=settings.ROLE_PROJECT_ADMIN) def test_allow_project_admin_to_approve_and_disapprove_labels(self): self.client.login(username=self.project_admin_name, password=self.project_admin_pass) response = self.client.post(self.url, format='json', data={'approved': True}) self.assertEqual(response.data['annotation_approver'], self.project_admin_name) response = self.client.post(self.url, format='json', data={'approved': False}) self.assertIsNone(response.data['annotation_approver']) def test_allow_approver_to_approve_and_disapprove_labels(self): self.client.login(username=self.approver_name, password=self.approver_pass) response = self.client.post(self.url, format='json', data={'approved': True}) self.assertEqual(response.data['annotation_approver'], self.approver_name) response = self.client.post(self.url, format='json', data={'approved': False}) self.assertIsNone(response.data['annotation_approver']) def test_disallows_non_annotation_approver_to_approve_and_disapprove_labels(self): self.client.login(username=self.annotator_name, password=self.annotator_pass) response = self.client.post(self.url, format='json', data={'approved': True}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @classmethod def doCleanups(cls): remove_all_role_mappings() class TestSearch(APITestCase): @classmethod def setUpTestData(cls): cls.project_member_name = 'project_member_name' cls.project_member_pass = 'project_member_pass' cls.non_project_member_name = 'non_project_member_name' cls.non_project_member_pass = 'non_project_member_pass' create_default_roles() project_member = User.objects.create_user(username=cls.project_member_name, password=cls.project_member_pass) non_project_member = User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass) cls.main_project = mommy.make('TextClassificationProject', users=[project_member]) cls.search_term = 'example' doc1 = mommy.make('Document', text=cls.search_term, project=cls.main_project) doc2 = mommy.make('Document', text='Lorem', project=cls.main_project) label1 = mommy.make('Label', project=cls.main_project) label2 = mommy.make('Label', project=cls.main_project) mommy.make('SequenceAnnotation', document=doc1, user=project_member, label=label1) mommy.make('SequenceAnnotation', document=doc2, user=project_member, label=label2) sub_project = mommy.make('TextClassificationProject', users=[non_project_member]) mommy.make('Document', text=cls.search_term, project=sub_project) cls.url = reverse(viewname='doc_list', args=[cls.main_project.id]) cls.data = {'q': cls.search_term} assign_user_to_role(project_member=project_member, project=cls.main_project, role_name=settings.ROLE_ANNOTATOR) def test_can_filter_doc_by_term(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=self.data) count = Document.objects.filter(text__contains=self.search_term, project=self.main_project).count() self.assertEqual(response.data['count'], count) def test_can_order_doc_by_created_at_ascending(self): params = {'ordering': 'created_at'} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project).order_by('created_at').values() for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) def test_can_order_doc_by_created_at_descending(self): params = {'ordering': '-created_at'} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project).order_by('-created_at').values() for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) def test_can_order_doc_by_annotation_updated_at_ascending(self): params = {'ordering': 'seq_annotations__updated_at'} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project).order_by('seq_annotations__updated_at').values() for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) def test_can_order_doc_by_annotation_updated_at_descending(self): params = {'ordering': '-seq_annotations__updated_at'} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project).order_by('-seq_annotations__updated_at').values() for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) @classmethod def doCleanups(cls): remove_all_role_mappings() class TestFilter(APITestCase): @classmethod def setUpTestData(cls): cls.project_member_name = 'project_member_name' cls.project_member_pass = 'project_member_pass' create_default_roles() project_member = User.objects.create_user(username=cls.project_member_name, password=cls.project_member_pass) cls.main_project = mommy.make('SequenceLabelingProject', users=[project_member]) cls.label1 = mommy.make('Label', project=cls.main_project) cls.label2 = mommy.make('Label', project=cls.main_project) doc1 = mommy.make('Document', project=cls.main_project) doc2 = mommy.make('Document', project=cls.main_project) mommy.make('Document', project=cls.main_project) mommy.make('SequenceAnnotation', document=doc1, user=project_member, label=cls.label1) mommy.make('SequenceAnnotation', document=doc2, user=project_member, label=cls.label2) cls.url = reverse(viewname='doc_list', args=[cls.main_project.id]) cls.params = {'seq_annotations__label__id': cls.label1.id} assign_user_to_role(project_member=project_member, project=cls.main_project, role_name=settings.ROLE_ANNOTATOR) def test_can_filter_by_label(self): self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=self.params) docs = Document.objects.filter(project=self.main_project, seq_annotations__label__id=self.label1.id).values() for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) def test_can_filter_doc_with_annotation(self): params = {'seq_annotations__isnull': False} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project, seq_annotations__isnull=False).values() self.assertEqual(response.data['count'], docs.count()) for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) def test_can_filter_doc_without_anotation(self): params = {'seq_annotations__isnull': True} self.client.login(username=self.project_member_name, password=self.project_member_pass) response = self.client.get(self.url, format='json', data=params) docs = Document.objects.filter(project=self.main_project, seq_annotations__isnull=True).values() self.assertEqual(response.data['count'], docs.count()) for d1, d2 in zip(response.data['results'], docs): self.assertEqual(d1['id'], d2['id']) @classmethod def doCleanups(cls): remove_all_role_mappings()
53.016827
115
0.672909
2,614
22,055
5.355011
0.06733
0.117017
0.049793
0.048221
0.858123
0.825046
0.798114
0.784469
0.759251
0.734534
0
0.006735
0.225754
22,055
415
116
53.144578
0.813012
0.001859
0
0.608696
0
0
0.085045
0.034299
0
0
0
0.00241
0.089855
1
0.113043
false
0.168116
0.023188
0
0.150725
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
7a3a373d8e90f4ce7810b775f353120b7fe9db04
97,937
py
Python
misc-cogs.py
RobertoDraws/TriviaNationBot
0ad4ce9ab35fde0d9fe475ea871bca8e21416963
[ "MIT" ]
1
2020-12-18T20:32:13.000Z
2020-12-18T20:32:13.000Z
misc-cogs.py
RobertoDraws/TriviaNationBot
0ad4ce9ab35fde0d9fe475ea871bca8e21416963
[ "MIT" ]
null
null
null
misc-cogs.py
RobertoDraws/TriviaNationBot
0ad4ce9ab35fde0d9fe475ea871bca8e21416963
[ "MIT" ]
1
2019-04-04T12:09:39.000Z
2019-04-04T12:09:39.000Z
#Included: doge, warm, cookie, eightball, coinflip, farm, sport/play import discord from discord.ext import commands from discord.ext.commands import Bot from discord.utils import find import asyncio import logging import os import time import requests import random from time import localtime, strftime from datetime import datetime import subprocess import colorama from urllib.parse import quote from itertools import cycle from discord.utils import find spin_logo = "https://cdn.discordapp.com/attachments/411306798042054657/471067301408538635/Trivia_Nation_Logo_HQ.jpg" red = "0xff2f2f" green = "0xbfea15" class misccogs: def __init__(self, bot): self.bot = bot #█▀▀▄ █▀▀█ █▀▀▀ █▀▀ #█░░█ █░░█ █░▀█ █▀▀ #▀▀▀░ ▀▀▀▀ ▀▀▀▀ ▀▀▀ @commands.command(pass_context = True) async def doge(self, ctx): url = 'https://dog.ceo/api/breeds/image/random' r = requests.get(url) data = r.json() embed = discord.Embed(title=":dog: Woof!", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_image(url=data["message"]) embed.set_footer(text='+doge', icon_url=spin_logo) await self.bot.say(embed=embed) print ("| {0} has used the doge command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +doge', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url=data["message"]) embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) #█░░░█ █▀▀█ █▀▀█ █▀▄▀█ #█▄█▄█ █▄▄█ █▄▄▀ █░▀░█ #░▀░▀░ ▀░░▀ ▀░▀▀ ▀░░░▀ @commands.command(pass_context=True) async def warm(self, ctx, user: discord.Member): embed=discord.Embed(title=":sun_with_face: **{0}** has been warmed by **{1}** :heart:.".format(user.name, ctx.message.author.name), color=0xeca762, timestamp=datetime.utcnow()) embed.set_footer(text='+warm', icon_url=spin_logo) await self.bot.say(embed=embed) print ("| {0} has used the warm command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +warm {}'.format(user.name), icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url=ctx.message.author.avatar_url) embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) @warm.error async def warm_error(self, error, ctx): print ("| {0} has used the warn command with an error in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Invalid syntax: +warm', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url=ctx.message.author.avatar_url) embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description="This command makes a member warm :wink:", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Invalid Syntax".format(user.name)) embed.add_field(name="Usage", value="+warm <member>") embed.set_footer(text='+warm', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(5) await self.bot.delete_message(ctx.message) await self.bot.delete_message(botmessage1) #█▀▀ █▀▀█ █▀▀█ █░█ ░▀░ █▀▀ #█░░ █░░█ █░░█ █▀▄ ▀█▀ █▀▀ #▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀░▀ ▀▀▀ ▀▀▀ @commands.command(pass_context=True) async def cookie(self, ctx, user: discord.Member): embed=discord.Embed(title="**{0}** has given a :cookie: with :heart: to **{1}**.".format(ctx.message.author.name, user.name), color=0xeca762, timestamp=datetime.utcnow()) embed.set_footer(text='+cookie', icon_url=spin_logo) await self.bot.say(embed=embed) print ("| {0} has used the cookie command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +cookie {}'.format(user.name), icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url=ctx.message.author.avatar_url) embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) @cookie.error async def cookie_error(self, error, ctIx): print ("| {0} has used the cookie command with an error in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Invalid syntax: +cookie', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url=ctx.message.author.avatar_url) embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description="This command gives a member a :cookie:.", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Invalid Syntax".format(user.name)) embed.add_field(name="Usage", value="+cookie <member>") embed.set_footer(text='+cookie', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(5) await self.bot.delete_message(ctx.message) await self.bot.delete_message(botmessage1) #█▀▀ ░▀░ █▀▀▀ █░░█ ▀▀█▀▀ █▀▀▄ █▀▀█ █░░ █░░ #█▀▀ ▀█▀ █░▀█ █▀▀█ ░░█░░ █▀▀▄ █▄▄█ █░░ █░░ #▀▀▀ ▀▀▀ ▀▀▀▀ ▀░░▀ ░░▀░░ ▀▀▀░ ▀░░▀ ▀▀▀ ▀▀▀ @commands.command(pass_context=True, aliases=['8ball']) @commands.cooldown(1, 20, commands.BucketType.user) async def eightball(self, ctx, *, mesg): print ("| {0} has used the eightball command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +eightball', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://thumbs.gfycat.com/KeenVerifiableCavy-max-1mb.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="Randomly selecting answer, give me a few seconds...", color=0xffffff) embed.set_author(name="Magic 8 Ball", icon_url="https://thumbs.gfycat.com/KeenVerifiableCavy-max-1mb.gif") botmessage = await self.bot.say(embed=embed) await asyncio.sleep(5) await self.bot.delete_message(botmessage) possible_responses = [ ':white_check_mark: It is certain.', ':white_check_mark: It is decidedly so.', ':white_check_mark: Without a doubt.', ':white_check_mark: Yes - definitely.', ':white_check_mark: You may rely on it.', ':white_check_mark: As I see it, yes.', ':white_check_mark: Most likely.', ':white_check_mark: Outlook good.', ':white_check_mark: Yes.', ':white_check_mark: Signs point to yes.', ':warning: Reply hazy, try again', ':warning: Ask again later.', ':warning: Better not tell you now.', ':warning: Cannot predict now.', ':warning: Concentrate and ask again.', ":x: Don't count on it.", ':x: My reply is no.', ':x: My sources say no.', ':x: Outlook not so good.', ':x: Very doubtful.', ] answer = (random.choice(possible_responses)) embed=discord.Embed(title="And the answer is...", color=0xffffff, timestamp=datetime.utcnow()) embed.set_author(name="Magic 8 Ball", icon_url="https://thumbs.gfycat.com/KeenVerifiableCavy-max-1mb.gif") embed.add_field(name="Question", value='{}'.format(mesg), inline=False) embed.add_field(name="Answer", value='{}'.format(answer), inline=False) embed.set_footer(text='+eightball', icon_url=spin_logo) await self.bot.say(embed=embed) @eightball.error async def eightball_error(self, error, ctx): print ("| {0} has used the eightball command on cooldown in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Cooldown: +eightball', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://thumbs.gfycat.com/KeenVerifiableCavy-max-1mb.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description=f'**You are on cooldown or you have not asked a question :alarm_clock:.** Try again in {str(round(error.retry_after, 2))} seconds!\n*This message will be deleted once you can use the* `+eightball` *command again.*', color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="8Ball Cooldown".format(user.name)) embed.set_thumbnail(url="https://i.imgur.com/zEbHiZs.gif") embed.set_footer(text='+eightball', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(error.retry_after) await self.bot.delete_message(botmessage1) #█▀▀ █▀▀█ ░▀░ █▀▀▄ █▀▀ █░░ ░▀░ █▀▀█ #█░░ █░░█ ▀█▀ █░░█ █▀▀ █░░ ▀█▀ █░░█ #▀▀▀ ▀▀▀▀ ▀▀▀ ▀░░▀ ▀░░ ▀▀▀ ▀▀▀ █▀▀▀ @commands.command(pass_context=True, aliases=['coin', 'flip']) @commands.cooldown(1, 20, commands.BucketType.user) async def coinflip(self, ctx, *, choices): print ("| {0} has used the coinflip command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +coinflip', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://i.imgur.com/GsrBafV.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="Flipping, give me a few seconds...", color=0xffffff) embed.set_author(name="Coin Flip", icon_url="https://i.imgur.com/GsrBafV.gif") botmessage = await self.bot.say(embed=embed) await asyncio.sleep(5) await self.bot.delete_message(botmessage) answer = random.choice(list(map(str.strip, choices.split('|')))) embed=discord.Embed(title="And the answer is...", color=0xffffff, timestamp=datetime.utcnow()) embed.set_author(name="Coin Flip", icon_url="https://i.imgur.com/GsrBafV.gif") embed.add_field(name="Options", value='{}'.format(choices), inline=False) embed.add_field(name="Answer", value='{}'.format(answer), inline=False) embed.set_footer(text='+coinflip', icon_url=spin_logo) await self.bot.say(embed=embed) @coinflip.error async def coinflip_error(self, error, ctx): print ("| {0} has used the coinflip command on cooldown in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Cooldown: +coinflip', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://i.imgur.com/GsrBafV.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description=f'**You are on cooldown or you have not put in two arguments :alarm_clock:.**Try again in {str(round(error.retry_after, 2))} seconds!\n*This message will be deleted once you can use the* `+coinflip` *command again.*', color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Coin Flip Cooldown".format(user.name)) embed.set_thumbnail(url="https://i.imgur.com/zEbHiZs.gif") embed.set_footer(text='+coinflip', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(error.retry_after) await self.bot.delete_message(botmessage1) #█▀▀ █▀▀█ █▀▀█ █▀▄▀█ #█▀▀ █▄▄█ █▄▄▀ █░▀░█ #▀░░ ▀░░▀ ▀░▀▀ ▀░░░▀ @commands.command(pass_context=True, aliases=['farmy']) @commands.cooldown(1, 30, commands.BucketType.user) async def farm(self, ctx): print ("| {0} has used the farm command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +farm', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://i.imgur.com/g12DuwU.png') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="Farming, give me a few seconds...", color=0xffffff) embed.set_author(name="Farmy", icon_url="https://media0.giphy.com/media/l378lr9mW23l1Tw4g/giphy.gif") botmessage = await self.bot.say(embed=embed) await asyncio.sleep(3) await self.bot.delete_message(botmessage) possible_responses = [ ':green_apple:', ':apple:', ':pear:', ':tangerine:', ':lemon:', ':banana:', ':watermelon:', ':grapes:', ':strawberry:', ':melon:', ':cherries:', ':peach:', ':pineapple:', ':tomato:', ':eggplant:', ':hot_pepper:', ':corn:', ':sweet_potato:', ':honey_pot:', ':bread:', ':cheese:', ':bacon:', ':potato:', ':carrot:', ':french_bread:', ':kiwi:', ':peanuts:', ':milk:', ':chocolate_bar:', ':burrito:', ':chicken:', ':poultry_leg:', ':meat_on_bone:', ':fried_shrimp:', ':cooking:', ':hamburger:', ':fries:', ':hotdog:', ':pizza:', ':spaghetti:', ':taco:', ':ramen:', ':stew:', ':fish_cake:', ':sushi:', ':bento:', ':curry:', ':rice_ball:', ':rice:', ':rice_cracker:', ':oden:', ':dango:', ':shaved_ice:', ':ice_cream:', ':icecream:', ':cake:', ':birthday:', ':custard:', ':candy:', ':lollipop:', ':popcorn:', ':doughnut:', ':cookie:', ':beer:', ':beers:', ':wine_glass:', ':cocktail:', ':tropical_drink:', ':salad:', ':shallow_pan_of_food:', ':stuffed_flatbread:', ':champagne_glass:', ':tumbler_glass:', ':spoon:', ':egg:', ':pancakes:', ':poop:', ] item = (random.choice(possible_responses)) embed=discord.Embed(title=":tractor: | {0}, you farmed: {1}!".format(ctx.message.author.name, item), color=0xffffff, timestamp=datetime.utcnow()) embed.set_author(name="Farmy", icon_url="https://media0.giphy.com/media/l378lr9mW23l1Tw4g/giphy.gif") embed.set_thumbnail(url='https://i.imgur.com/g12DuwU.png') embed.set_footer(text='+farm (Idea by DevelUpGames#1185)', icon_url=spin_logo) await self.bot.say(embed=embed) @farm.error async def farm_error(self, error, ctx): print ("| {0} has used the farm command on cooldown in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Cooldown: +farm', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://i.imgur.com/GsrBafV.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description=f'**You are on cooldown! :alarm_clock:.**Try again in {str(round(error.retry_after, 2))} seconds!\n*This message will be deleted once you can use the* `+farm` *command again.*', color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Farmy Cooldown".format(user.name)) embed.set_thumbnail(url="https://i.imgur.com/zEbHiZs.gif") embed.set_footer(text='+farm', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(error.retry_after) await self.bot.delete_message(botmessage1) #█▀▀ █▀▀█ █▀▀█ █▀▀█ ▀▀█▀▀ #▀▀█ █░░█ █░░█ █▄▄▀ ░░█░░ #▀▀▀ █▀▀▀ ▀▀▀▀ ▀░▀▀ ░░▀░░ @commands.command(pass_context=True, aliases=['play']) @commands.cooldown(1, 30, commands.BucketType.user) async def sport(self, ctx): print ("| {0} has used the sport command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +sport', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://cdn.dribbble.com/users/68398/screenshots/3333492/cyclist.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="Thinking, give me a few seconds...", color=0xffffff) embed.set_author(name="Sport", icon_url="https://i.imgur.com/Yb9gtzH.jpg") botmessage = await self.bot.say(embed=embed) await asyncio.sleep(3) await self.bot.delete_message(botmessage) possible_responses = [ ':soccer:', ':basketball:', ':football:', ':baseball:', ':tennis:', ':volleyball:', ':rugby_football:', ':8ball:', ':ping_pong:', ':badminton:', ':goal:', ':hockey:', ':field_hockey:', ':cricket:', ':golf:', ':bow_and_arrow:', ':fishing_pole_and_fish:', ':boxing_glove:', ':martial_arts_uniform:', ':running_shirt_with_sash:', ':ice_skate:', ':ski:', ':skier:', ':snowboarder:', ':lifter:', ':wrestlers:', ':cartwheel:', ':person_doing_cartwheel:', ':basketball_player:', ':person_with_ball:', ':basketball_player::', ':person_with_ball:', ':fencer:', ':handball:', ':golfer:', ':horse_racing:', ':surfer:', ':swimmer:', ':water_polo:', ':rowboat:', ':mountain_bicyclist:', ':bicyclist:', ':circus_tent:', ':juggling:', ':juggler:‍', ':performing_arts:', ':art:', ':clapper:', ':microphone:', ':headphones:', ':musical_score:', ':musical_keyboard:', ':drum:', ':saxophone:', ':trumpet:', ':guitar:', ':violin:', ':game_die:', ':dart:', ':bowling:', ':video_game:', ':slot_machine:', ] sport = (random.choice(possible_responses)) embed=discord.Embed(title=":trophy: | {0}, you will play/do: {1}!".format(ctx.message.author.name, sport), color=0xffffff, timestamp=datetime.utcnow()) embed.set_author(name="Sports", icon_url="https://i.imgur.com/Yb9gtzH.jpg") embed.set_thumbnail(url='https://cdn.dribbble.com/users/68398/screenshots/3333492/cyclist.gif') embed.set_footer(text='+sport', icon_url=spin_logo) await self.bot.say(embed=embed) @sport.error async def sport_error(self, error, ctx): print ("| {0} has used the sport command on cooldown in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Cooldown: +sport', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://i.imgur.com/GsrBafV.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description=f'**You are on cooldown! :alarm_clock:.**Try again in {str(round(error.retry_after, 2))} seconds!\n*This message will be deleted once you can use the* `+sport` *command again.*', color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Sport Cooldown".format(user.name)) embed.set_thumbnail(url="https://i.imgur.com/zEbHiZs.gif") embed.set_footer(text='+sport', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(error.retry_after) await self.bot.delete_message(botmessage1) @commands.command(pass_context=True) @commands.cooldown(1, 30, commands.BucketType.user) async def fish(self, ctx): print ("| {0} has used the fish command in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Command: +fish', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://media.giphy.com/media/Y4sT5ESJWdPGtTKqNd/giphy.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="Fishing, give me a few seconds...", color=0xffffff) embed.set_author(name="Fishy", icon_url="https://media.giphy.com/media/Y4sT5ESJWdPGtTKqNd/giphy.gif") botmessage = await self.bot.say(embed=embed) await asyncio.sleep(3) await self.bot.delete_message(botmessage) possible_responses = [ ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :shopping_cart:!**'.format(ctx.message.author.name), #Shopping Cart ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :paperclip:!**'.format(ctx.message.author.name), #Paper Clip ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :wrench:!**'.format(ctx.message.author.name), #Wrench ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :mans_shoe:!**'.format(ctx.message.author.name), #Shoes ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :battery:!**'.format(ctx.message.author.name), #Battery ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :fish:!**'.format(ctx.message.author.name), #Blue fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :tropical_fish:!**'.format(ctx.message.author.name), #Yellow fish ':fishing_pole_and_fish: | **{}, you caught: :whale:!**'.format(ctx.message.author.name), #Whale ':fishing_pole_and_fish: | **{}, you caught: :whale:!**'.format(ctx.message.author.name), #Whale ':fishing_pole_and_fish: | **{}, you caught: :whale:!**'.format(ctx.message.author.name), #Whale ':fishing_pole_and_fish: | **{}, you caught: :whale:!**'.format(ctx.message.author.name), #Whale ':fishing_pole_and_fish: | **{}, you caught: :penguin:!**'.format(ctx.message.author.name), #Penguin ':fishing_pole_and_fish: | **{}, you caught: :penguin:!**'.format(ctx.message.author.name), #Penguin ':fishing_pole_and_fish: | **{}, you caught: :penguin:!**'.format(ctx.message.author.name), #Penguin ':fishing_pole_and_fish: | **{}, you caught: :crown:!**'.format(ctx.message.author.name), #Crown ':fishing_pole_and_fish: | **{}, you caught: :crown:!**'.format(ctx.message.author.name), #Crown ':fishing_pole_and_fish: | **{}, you caught: :gem:!**'.format(ctx.message.author.name), #Gem ] fish = (random.choice(possible_responses)) embed=discord.Embed(title="{}".format(fish), color=0xffffff, timestamp=datetime.utcnow()) embed.set_author(name="Fishy", icon_url="https://media.giphy.com/media/Y4sT5ESJWdPGtTKqNd/giphy.gif") embed.set_thumbnail(url='https://media.giphy.com/media/Y4sT5ESJWdPGtTKqNd/giphy.gif') embed.set_footer(text='+fish', icon_url=spin_logo) await self.bot.say(embed=embed) @fish.error async def fish_error(self, error, ctx): print ("| {0} has used the fish command on cooldown in {1}".format(ctx.message.author.name, ctx.message.server.name)) print('|-----------------------------------------------------------------------------') user = discord.Member embed=discord.Embed(title="", color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name='Cooldown: +fish', icon_url=spin_logo) embed.add_field(name="Server", value="{}".format(ctx.message.server.name)) embed.add_field(name="Channel", value="{}".format(ctx.message.channel.mention)) embed.add_field(name="User", value="{}".format(ctx.message.author.mention)) embed.set_thumbnail(url='https://media.giphy.com/media/Y4sT5ESJWdPGtTKqNd/giphy.gif') embed.set_footer(text="ID: {}".format(ctx.message.id)) await self.bot.send_message(discord.Object(id='436634502392053761'), embed=embed) embed=discord.Embed(title="", description=f'**You are on cooldown :alarm_clock:.** Try again in {str(round(error.retry_after, 2))} seconds!\n*This message will be deleted once you can use the* `+fish` *command again.*', color=0xbfea15, timestamp=datetime.utcnow()) embed.set_author(name="Fishy Cooldown".format(user.name)) embed.set_thumbnail(url="https://i.imgur.com/zEbHiZs.gif") embed.set_footer(text='+fish', icon_url=spin_logo) botmessage1 = await self.bot.say(embed=embed) await asyncio.sleep(error.retry_after) await self.bot.delete_message(botmessage1) def setup(bot): bot.add_cog(misccogs(bot))
84.501294
313
0.604205
11,491
97,937
4.989731
0.042294
0.116679
0.180547
0.230985
0.943562
0.938748
0.935801
0.931615
0.922424
0.909047
0
0.006943
0.192593
97,937
1,158
314
84.574266
0.712488
0.056138
0
0.73772
0
0.004634
0.463579
0.160073
0
0
0.003264
0
0
1
0.001854
false
0.007414
0.015755
0
0.018536
0.027804
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
7abea5cefb9ece4bdd5e00bd6f12b07053cf28a2
6,507
py
Python
token.py
Asjadd/Discord-brute-force-Public
a8e6e80dbb0cb7754707fc3890a15997de3f679c
[ "MIT" ]
1
2022-02-28T01:21:08.000Z
2022-02-28T01:21:08.000Z
token.py
Asjadd/Discord-brute-force-Public
a8e6e80dbb0cb7754707fc3890a15997de3f679c
[ "MIT" ]
null
null
null
token.py
Asjadd/Discord-brute-force-Public
a8e6e80dbb0cb7754707fc3890a15997de3f679c
[ "MIT" ]
null
null
null
from pytransform import pyarmor_runtime pyarmor_runtime() __pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x02\x06\x00\x00\x00\x00\x00\x18\x25\xb6\x95\x4d\x90\x91\x3d\x4d\x81\x54\x04\x70\xe6\x7e\x97\x8d\x00\x00\x00\x00\x00\x00\x00\x00\x8a\x50\x4a\xf3\xe8\x1f\xe3\xea\x10\xca\x5d\x60\xb1\xbd\x86\xae\x3a\xa2\xd6\xf6\x91\x52\xbc\x1a\x0f\x7e\xa9\x9a\xbf\x0a\x05\xb9\xd0\x72\x55\xc2\xe9\x75\xa2\x20\xde\x98\xff\x60\x58\x4f\xdf\x8e\x80\x9f\x21\x77\xba\xd3\xc2\x30\xab\xd8\x29\xf8\x25\x7b\xca\xb8\x85\xfe\x51\x75\xca\x06\xc5\x36\xd3\x2b\x35\x24\xc0\x58\x96\x38\x50\xfb\x7d\x97\x93\x7c\x84\x2c\x79\xd2\x40\x5c\x8e\x8a\xe1\x0f\xaf\xfb\x61\xa0\x28\xc8\x1c\xb4\x75\x39\x2f\x55\x8d\xed\x33\xa7\x33\x2a\x87\x2a\x56\xca\x03\x5d\x28\x80\xa7\x0c\x14\x68\x16\xbc\x00\xcf\xfe\xdf\xe7\xa7\x94\xd4\x34\xc7\x36\xb6\xcb\xb9\x1e\xaf\xff\xcb\x57\xe0\x67\x7d\x1b\x68\x18\xcc\xe7\xce\x5f\xcd\x5c\x6f\xd7\x93\xb8\x0f\xc2\x41\x4c\x09\x9b\xfa\x0b\x03\xd3\xc1\xbf\x21\xc9\x90\xc8\xd0\x66\x4b\x9c\xb7\xe4\x3a\x47\xe5\x81\x4c\x6f\x1b\xf0\xe3\x7b\x10\xb6\x4e\xe9\x14\x57\x12\xa6\x86\x6c\x4b\x20\x93\xdd\x73\xbe\xe9\xc7\x24\xed\x99\xef\xdc\xe9\x7b\x81\x73\xbb\xc4\xa5\x96\x48\x70\xdd\x70\x83\xac\xba\xc3\x0d\x89\xf9\x9b\x85\x10\x0b\x51\xd0\x3d\x1e\xd2\x58\x08\x68\xbc\x3b\x8c\x64\x55\xe2\x3e\x42\x21\xce\x7e\xc0\x94\xa8\xe9\x34\x9c\x14\x1e\x40\xc1\xbe\x9a\xc1\xc7\xf5\xd9\x4f\x9e\x6e\x3b\xff\xac\xa5\x3c\x64\xcb\xc1\x44\x62\x6b\xd0\xab\x04\x9c\x7c\xf8\x4f\x21\xf8\x6c\x48\xc7\x75\x76\xa6\xf6\x94\xd6\x6b\xde\x8c\x10\x40\x34\x04\x80\x95\xc0\x46\x84\x4d\xea\x56\x94\x0d\xb4\xc0\x07\x96\xab\x71\x75\x04\xf1\x67\xde\x6d\x30\xad\xe2\x01\xbe\x55\xb4\xf7\xe2\x34\xe5\xe9\xb3\xd2\xf6\xda\x5b\x59\xe8\x6d\xb1\x31\x02\x65\x3e\x34\x29\x5c\x21\x79\xf9\x83\x18\x67\x77\xa6\xe5\x38\xfa\xd9\x0c\x85\xb1\xcc\xdf\x99\xd2\x42\x02\x96\x26\x40\xee\x09\x7c\x08\x91\x33\xae\x28\x7c\x15\x96\x82\x8f\xfb\x03\x29\x82\x4d\x18\xf0\xde\x26\xdb\x0a\x36\x2b\xd5\x5d\xcd\x75\xdd\xfb\x81\xf5\x5f\x32\x9b\x5c\xf9\xf7\x73\x0f\x9d\x2b\xc6\xd1\x5d\xe9\xf5\x58\xac\xc8\xfc\xb6\xfd\xa1\x00\x4b\x80\xee\x6b\x34\x07\x64\xcd\x2c\xf8\xd3\x2c\xa1\x1b\x84\xdf\xbd\x87\x0a\x0e\x6c\x98\x28\xa8\x05\xa5\x36\x4d\xfe\x1c\xd2\xcd\xcd\xcc\x41\xf9\xcc\x50\x74\xdb\x7d\x99\x83\xc7\x5c\xbd\x6a\xb1\x07\xd7\x7e\xe4\x8c\x8e\x4a\xee\x8a\x07\x3c\xd6\xb6\x4a\x33\x0d\xc3\xfa\x00\x38\x06\x5b\x08\x4f\xa8\xea\x4d\x23\xca\x16\x8d\xe7\x90\xeb\x75\x3e\xa5\xdb\xb9\x41\x97\xad\x76\x61\xda\xd2\x66\x42\x61\xc4\x20\x0e\x18\x57\x35\xfe\xea\x88\x42\xf5\xe0\xa0\xef\x24\x5a\x13\x5b\x7d\xc5\x8f\x60\xa0\x26\xc0\x94\x2f\xa4\x46\x69\x14\xff\x09\xf5\x42\x3a\x14\x32\xc4\x3d\xe7\xd6\x43\x35\xf2\xc2\xbf\xf0\x0e\x41\x0b\x95\x79\x0e\x3b\x89\x50\xf7\x62\x36\xc9\x75\x22\x40\xed\xe2\xe4\xe9\x71\xb8\x8a\x46\x9f\xca\x42\x24\x3c\x3d\x0b\xcf\x9d\x2b\x46\x43\x40\x1c\x0b\x00\xe6\x51\x52\x42\xbd\xf2\x01\x16\x59\xb5\xa0\x1a\xb9\xc9\xa6\x0a\x5c\xc4\x1b\xb7\xa4\x91\xa1\x89\x19\x15\x9e\xc0\x8c\x22\xf4\xfb\xe9\x19\x37\x9b\x98\x19\x4b\xce\x06\xf6\xa4\x8c\xf4\x35\xbd\x62\xfe\x07\x06\x94\x63\xae\xf6\xe2\x4c\xc5\xa5\x6f\x73\x0c\x5c\x1a\x02\x42\x5d\xd8\xb5\xa3\xc7\xc0\xc4\x9f\x29\xd2\x21\xc2\xfb\x74\xe7\x5c\xba\xf2\x7d\x33\xdd\x19\xa0\xc8\x61\xe2\xf8\x62\x0d\x04\xec\xd3\x57\x68\xa8\x42\xa7\x42\x36\xab\xf7\xa8\xc4\x98\x70\x7f\x6a\x99\x5f\x35\x38\x14\xb3\xb2\x13\x36\x17\xe3\xb4\xcd\xf7\x5b\x5a\x42\x33\x56\xf7\xd2\x26\x1e\xc4\x91\x3e\xde\x6d\xfb\x9b\x9b\x29\xce\x0e\x7c\x9b\x96\x43\x59\x25\xed\xe9\x27\x08\xdc\x81\xaf\xe5\x31\x7b\x75\xc5\x35\xb2\xeb\x2a\x85\xe5\x90\xdb\x2e\x40\x02\x4f\x87\xb3\x75\x4b\xe6\xa6\x3f\xb8\x20\xaa\x29\xe6\xd1\xef\xa7\x3a\xa9\x48\x17\xee\xe3\x4a\x14\xd2\xe1\x4d\x54\x0d\xb8\x34\xd8\x3c\xd5\xa3\x60\x44\x94\x71\xf5\x77\x97\x14\xdf\x74\xe0\x97\x8f\x3b\x41\x1b\x38\x7d\xf1\x6d\x47\x4b\xc3\xe2\x96\xdf\xda\x53\xfa\x4c\x04\x64\x4a\x22\x10\xfa\xf9\x8b\xa4\x64\xcf\x9f\xa7\x18\xe6\x14\x11\x0a\x77\x14\x5e\xd2\x3d\x68\xc5\x11\xe0\xc3\x0c\xa7\x9f\x2c\x39\x00\x7f\x7a\x92\xcc\xeb\x78\x7c\x60\x6c\x86\xc8\x67\x57\x8f\x1e\xdf\xdf\x1f\xe8\x92\x59\xfe\x23\x1c\x7b\xfb\xce\x12\x6e\x39\x2c\x05\x72\xd7\xb4\xeb\x2b\xfa\x8f\x55\x2b\xd8\xe3\xa9\x9b\x97\x00\xae\x58\xbc\xa6\xbe\x5e\x25\x5d\x99\x66\x6f\x26\xd5\x1a\x40\x7a\xa3\xe9\x11\x21\x43\x17\xa2\x5b\x01\xb7\x0e\xc1\xc4\xef\xd1\x74\xe3\xde\x82\x66\x2a\xab\xe7\x81\x75\x5b\x48\x9a\x52\xf4\xd4\xb0\x65\xb9\x94\x67\x17\xba\xbc\xd3\x0e\xc8\x37\x22\xf7\x12\xcb\xd2\x47\xfc\xcd\x5b\x2d\xe9\xef\xb2\x58\xaa\xf6\xea\x20\xbb\xfa\x6a\xf7\x35\x8f\xc0\x5b\x44\x13\x29\xf6\x79\x74\x86\x16\x7b\xb9\x30\x40\x9f\x77\xbb\x27\x07\x38\x87\x67\xc7\x0c\x9d\xeb\x16\x56\x6b\xff\x65\x89\xcd\x7c\x02\x90\x31\xd3\x4f\xa6\xe6\xaf\x3e\x15\x00\x95\x92\x3f\x72\x9c\x74\x31\xdf\x24\x20\xd4\x7b\x4a\xda\x4a\x8f\xac\x55\x1a\xe2\xda\xe2\xee\xb4\x19\x74\x53\xb0\x85\x3c\xbf\x86\x1e\x79\xdc\xc2\x8e\xff\x2b\x09\x30\x34\x45\x3f\x09\xb5\xee\x49\xc9\xbb\x15\xc7\x8d\xc8\xc0\xd5\x0c\x67\xaf\x57\xdf\xaa\x05\x91\xd9\x25\x63\x83\x8e\x7e\x95\x34\xd5\x79\xc7\xdd\xe9\x6a\x72\xa0\x98\x71\x74\xea\x24\xc8\xfb\x9a\x5a\xd8\xe2\x2a\xaa\xe7\xe3\x28\x66\x7d\x12\x40\xe6\xc4\xec\x65\x13\xfd\x18\xb5\x20\xfa\x17\xf3\x38\xd7\xdf\x79\x9f\x5a\x7d\x6e\x09\x94\x79\x96\x4e\xaf\xee\xe8\x1b\x88\x83\x91\x7d\x20\x35\x36\xe1\xfd\xb6\xef\x05\x66\x23\x87\xaa\xe9\x11\x70\xbb\x31\xa2\x62\x63\x0d\xf5\x90\x54\xf4\xb9\x19\x87\xf5\xd1\xe4\x86\xc7\x69\xe5\x82\x87\x3f\xd9\x8f\xca\x2b\x20\x30\xbb\x90\xbc\xff\x0a\x4e\xd0\x74\xe6\x05\xb9\x4b\x09\xd0\x0b\x5a\xe2\x19\x24\x12\x82\xc0\x58\x19\xe2\x85\x2c\xff\x0a\xcb\xb0\x0f\xaf\x52\xc0\xea\xed\x7b\x06\x78\xb5\xbe\xd5\x19\x76\x5a\x05\xc7\x75\xb5\xaf\x56\xf8\x67\xdb\xe2\xf7\x87\x8b\xa1\x0a\x33\xa0\x9d\x12\x86\xef\x85\x88\xa5\xb1\x2c\x0e\xc5\x4d\x6a\x1a\xc5\x75\xfb\xc0\xd3\x5e\x25\x01\x8e\xf5\xf8\x22\x41\x94\xd6\x34\x08\xad\xc1\x24\xc1\x34\x62\x83\x79\xf0\x37\xa8\x8c\xaf\x4e\xda\x06\xf3\x8c\x9e\xc4\xe8\x49\x14\x6e\x5e\x58\x41\xf5\x4d\x52\x87\x44\x5c\x9e\x0a\xdd\xac\xbf\xae\x8f\x9d\xde\x73\xb5\xd8\xed\xcc\xe2\x5c\xd8\x55\x91\x9f\x5f\x88\xa4\x65\x73\x8d\x31\x78\x71\xdf\x96\xb1\xa7\x59\x04\x75\x37\xc3\x61\x79\x77\x65\xfd\x24\x53\x5f\x38\x4b\x12\xec\x1f\xd1\xe8\xad\x21\xd5\x1e\x44\xc7\xc7\x21\x95\xf5\x13\x11\x96\x24\x1b\x8e\xd3\x2c\xb3\x22\xd2\xab\xb9\x96\x8a\xab\x7b\x6c\x8e\xbb\x9b\x0c\x15\x2c\xcb\xcd\x18\x61\xe2\x6a\xeb\x03\x10\x5c\x1d\x15\xd7\x05\xdf\xaf\x23\xec\x1d\x9b\xb7\x52\x2e\x69\x7f\xfa\x20\x06\x28\xeb\xcb\xb6\xd5\x55\x8d\xa0\xa4\xb9\xd8\xb3\x39\xb5\x30\xd4\x20\xe1\xf3\xf2\x42\xea\x86\xee\xa3\x43\xf2\x29\x9e\x61\x93\xe9\xee\xc9\xec\xb7\x23\x16', 2)
2,169
6,447
0.750884
1,614
6,507
3.018587
0.164188
0.023399
0.024015
0.019704
0.008005
0.004926
0.004926
0
0
0
0
0.311067
0.001537
6,507
3
6,447
2,169
0.438818
0
0
0
0
0.333333
0.984937
0.984937
0
1
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
1
1
1
null
1
0
0
0
0
0
1
0
1
0
0
0
0
11
8fcb6883a3b20462aaa7ce787045ac24e1addbc9
11,396
py
Python
brywidgets/images.py
andy31lewis/brywidgets
2ff3ffe633cbf226208d7af95cc0eb542173be2b
[ "BSD-3-Clause" ]
1
2020-10-21T12:26:18.000Z
2020-10-21T12:26:18.000Z
brywidgets/images.py
andy31lewis/brywidgets
2ff3ffe633cbf226208d7af95cc0eb542173be2b
[ "BSD-3-Clause" ]
2
2020-04-22T12:04:52.000Z
2021-01-24T15:18:51.000Z
brywidgets/images.py
andy31lewis/brywidgets
2ff3ffe633cbf226208d7af95cc0eb542173be2b
[ "BSD-3-Clause" ]
1
2018-12-10T00:56:39.000Z
2018-12-10T00:56:39.000Z
whitemask_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAADHUlEQVR4nO3UAQ3AQBDDsP74c74BiS2FQt7dbduT1OsbkGUAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEGYAEPYDIJIF/AcNZY8AAAAASUVORK5CYII=' blackmask_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAADIklEQVR4nO3UsQ3AMADDMAfo/y+3S3/IIBLw7kkbkHX+AUHPBACyBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCBADCzrb39gngjg8B5QX0c+LYJAAAAABJRU5ErkJggg==' file_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAC4SURBVCjPdZFbDsIgEEWnrsMm7oGGfZrohxvU+Iq1TyjU60Bf1pac4Yc5YS4ZAtGWBMk/drQBOVwJlZrWYkLhsB8UV9K0BUrPGy9cWbng2CtEEUmLGppPjRwpbixUKHBiZRS0p+ZGhvs4irNEvWD8heHpbsyDXznPhYFOyTjJc13olIqzZCHBouE0FRMUjA+s1gTjaRgVFpqRwC8mfoXPPEVPS7LbRaJL2y7bOifRCTEli3U7BMWgLzKlW/CuebZPAAAAAElFTkSuQmCC' login_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH5AQRDyIA8MpkEwAAAZNJREFUWMPtlr1Ow0AQhL87m/w8AKFIS4fEiyNAQElBgWhCAaI0YBAFD0AiIXBi00ykU3IW2Nw5FKy0Suyzb2d3dscH/7ZuFhgDx8AUKIFFQJ9q7zFgjAfANvAKpAr+GTA5A/T0OxeINTsHKuAIGApISB8Ch4px5iv/TIvL6xgUG8V4W13sq+RVB71WAZX9Cx0fwg6ACTDQnknbjQbAewsKbvVOJn7tTynwLfTlTaq4BeSa8yd1+3dAqlC9Zp2yP2i+MyXRCQAXSAo8qxKPzv2kKwAuHXOB6IsOLwDjNF8ZUG4NcAfsipY9oPAAAOBSi/NAXsg/REUJXNdVINVDaSSdKTtSVa9NBCCvSTBaE1ppwUIT0asZx+AAjD61mXrhXpknsQEkzqxnyjx3grcWItMgc1eKc13bNgAGLUtzo4Zblv3HH6PUc7NQBk3sRYq3LyCmLZ+/PRE1CRzlRFS1mV3XihXNjnFkcxtzZj3SeaX/J5EAGO0NcOF7YCQhKZ2JCO2lYuzU0TISylgAThVj46fyzdsXtrrGy/mieuYAAAAASUVORK5CYII=' save_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4gcCEA0AJvnZjwAAAXdJREFUWMNj/MzwlZEBBXAjmF8Z0KS+YlXHy8QEZ3/+948BF0BV95mBgYGBgYlhgMGoAxjR0wAvAzdKJNrb21NkwcGDB9G8DEkIsDTAgk3Tmr2/4Oyd874yzFoiQJblaTEfGHL3QhLrtet/GepyODHUsJyfzM2y/TnDf7hIGwNcAwMDA8OXLxcYPn8yIssBX75cYLh23YKBgYGB4fHxHwwMDAwMEyf+Y2FgYGA4D81Fo4lw1AGjDhh1AAshBZquFgzpcSfIMlzT1YJyBxBrENkhcP85A6MGAwMjvYL8xXOIXfeHTBqAgSPbU0ky2MZzNnUdwMDAwFBXU4xVvKmll2HNsqlwfkhU9mg5MOqAIV4UX7v+F96GQwZ37tzHadCla/dQ+Nj0Y22WL6xiYEMXjG9j+EkrH1dWMbAzMDAwaOALgYVQRejgwwtE61lAgvTi+wbWKOBjYJZBErj1CWGJGh+qJTJ8CPYTZHEcFj5B42sgc56O5gIIAADUZGN7H3Li0AAAAABJRU5ErkJggg==' open_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4gcCEAg2lDS4UwAAAw1JREFUWMPtl0FIFFEYx//zxt2l3TYESddD6KHWxb2UoXgpCBSShGrLUynRsUgiAwkJgsBLIGTsKTrYUgSKqKdAD4E32cxDu6kVZeSmrNnmOkvs7ryvw87OzuqMO+qKF99l5r157/v9v+9975sZIS5RZSIB3Wa3J35pepXaZ05nYkntxHPjcYe+LSfL78d55sqwz+1AwL4LKCmWoZVwmJfV1yPs8apjtbMhrEPa0klhwAfxqAfCbuAtvUhpwdpWOxtC/zNYAaDJ701O3A5ZAeBUMSMQ9nhRMfACU6ONlsmzufHrd72psMeLzjuhpJHAouTAgi9kmRpttGwcz3prBNeNQHXHfGpoeNxwwVVfM76/dFv2LAmHhsfx8MEt3cmS1IbuniguOMwZb/J7k9sSUN0xnwqq3o8ZLqrumE+ZiUJ2C0yfguBhfyrjvTFckgLo6z9X0PsrvmZMjLsNBez4FDgc7bjXGSg4r7snCvfxPSpEDkf7ls8lKWDa1p6V4p+Li0Uuxavm4UtzXgARcwJipWCzkyAAONRiDP86N43X78pNSijH6bpy/I37Nx3D5eUoGhyPbN9MR0CBd/VFMDJYt+utudQ2gwa32S3Ig7cWTC4iAcQZiDNwzgDOQGkRXGaI/TiGrr4Ieh93IRh4bkKAWTgJ4JyBuAAulwBcAMmiKoLkEqxFKjRwj61wEhaAEwkgmYFI8VRmIC6CpxmIMh6TLIJkBmmlzBCuL2B1TBeuDS9xlhEgZ2CcREBmKphT5vovdkSB30cwUGMzdQy18PX4KxCJeVAuZ/uiKkANd1pU5yUTdhU+8aTGWurS/+jZJCALX4u9AecWxbOsYVHZVxEkC+CUEUFpZQsUoXLSkgc3XYjeT89gZLAVsegQiNvA00yBKCHPQtL5EeCkZLzMQCSYhutGQJIC+P3Fu+Nzvh34JgGXL57HtRtvTZdRvbYdOAAIT2/CsvQ5U4oBwHVi+1/IH3Te8xubYRJaXRDPuHIDH//kxDhP5hZVLRgLsFbl7jW3iNpz6xOfcna18w5+zf4DPXKrvrM0c5AAAAAASUVORK5CYII=' uparrow_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH4gYSDiAYbn7rYwAAAahJREFUOMu1kz1LHGEQx//z8uzeeXfGezGgW1gELFQkYJsmBgQbO4srkhQpBD9CvoSlXZqAH8AXUOzPQgwpkkJMCgs1t3rooXfmbp/dx8KAxHhrYwammZc/zG9mgP9pwxiVx2q4V2LgTanaWQzts6nB8TQBfbB5plzNvqYVfziP62L7G+H5xMVe+P2hWrofKM5WqrlpWfGH+kAASBjt/Su0NtzExe6/In/NWJ6vvM+8cp/zI0UQM1gFsY2BgoMrtRf9zMDa9c/WSU8G3PHOO1u6lNgIxiicTZBEFnEjht3OLRAjkwrxdPV4Fco7LAIWRWwjqCqEBc7SemOzXnt8CwyICuAc1BiwUbBh4OqO9+DcUMoWBMQiEGaoKogAE+QQv708LERBvRva+q/lk6meAmK5RSCI8ZDECZgIRITyZEWZJWh8OfPSR2j21br1CJ6XgaqB/HEVhYiAhV2qQFg7OG9+bX6ytotsvh+q5hakMbdMhFzqJQbvAhwtH31wHfzoHyt89EvZPBsHuAQAYFs2m3qJ963y8kUOpd9FInaIAfK5G24dnj7Zx94ADimDkToKgnYAAAAASUVORK5CYII=' saveas_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4gcCEAs4WKHGlwAAAZpJREFUWMNj/MzAwMiAAqpR+LxM7f9h7Mp/lXC5diRxZICsBmJcK5xZhSwOFWZiGGAw6gBG9DTAy8DwD5lvb29PkQUHDx5E4X+uZmBGTgMs2DSt2fsLzt457yvDrCUCZFmeFvOBIXcvNwMDAwPDtet/GepyODHUsJyfzMByHlkklwGugYGBgeHLlwsMnz8ZkeWAL18uMFy7bsHAwMDA8Pj4DwYGBgaG6/8hnr6+GE8I4AK8/PxEqfv88SPRZpLkgEuKKkSpUyTBTJIcoHjh7PDLhlRNA6TEPU3SgCKtQ2BkpgFNVwuG9LgTZBmu6WpBnSggxiCyQ+D+cwZGGSH0RgntwGOoXS+PDbVy4Mj2VJIMtvGcTf1sWFdTjFW8qaWXYc2yqXB+SFT2aJNs1AFDvCi+dv0vvA2HDO7cuY+7prx2D7XAwaIfa7N8YRUDGw9SSXjmHcP/9jaGn7Ty8doeBg4GBgaGu3fxhEBlFQM7jK2BJH4DiW1CpeKbRYaPgZnhD0LAhQ9Pe4CbsKX3vzL8x6nnBUL8r9ZoLoAAAM8Qb3qVzu5AAAAAAElFTkSuQmCC' closebutton_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAAA9CAQAAAC33KdKAAAAAmJLR0QA/4ePzL8AAAAJcEhZcwAACxMAAAsTAQCanBgAAAAHdElNRQfkBA8JKxSPmVc5AAAD3UlEQVRYw+2ZTWhcVRTHfy9IbSwiSbGgpbW6EqW40KhQRGrrx6Yoii4rxU0LiuLCrSgIKtRFF4K6KEjFgoVSR0q/FIQagy0ltjYxVNsUpdPEiZMYUzOZTP4ukmmSN+e+e+/Mm4XgvYtM3jvv/M4997x7zr0Pmmx9qf+LTepJmnlInckaurmTu+gGahS5QJGSRhLBIZ6mDW0PILiZ7VxEzl7gPla2Af86wBbK1DLg9V6jV2vytuAV/gxAL+0/czfsbJW7G+AhfomE1/sxJXlMvlrqz8HepuFdDLWIF2IPbGoKf3sO8Pnep2RrDPm1fPFCXIpdalYxk6sB4leI8cJfOeOFOEKII3YjON8GvBAvhY3+ZecKNxUIcvvvDj9+vePROZU1FBQZNVU57rg3BGWPAd/aj6okSRpkzmeA/pEkDjjub89OOY9m4gNM0ExdkH2mRCV7/L0evCQNUHXiK0sF+cKUesPB/g3uMed+OX7eCxVz7q+lBTlqyF12j99y2rSsNsh0Sm5Wk5Ygo4bOnkmHAeOmy74xTRhY9kbMasLE/2Fq/NrG9zhD67BpwvnFcHSMvujQNwqFNP0r+DQjuo86TJjHX4vCC6nT8sBA5vv9ncsETQfP/WJ/1TJgzLPEnDFNGDPxPl191jbDv8jaJjTiy15Npf0NFUBXUJo5GYAfC9Az0dNgwIOhWd2DHw3SMlXfk3Vc36PdGliqPJkUMnZ6RcL0dDT8WPLL17Ylxx34EW6LrXwXsdWIivER+7JK8aX3dQP0ezD+b8f2M+Fe/YSClNQ603K6ISwIVfa8BOeYDdAz/rxxvFD1PjZnLzupdjZgD3110HDLFe/ox8IWIp31GnDQiv0rnrmfoNu4fMa4tlHnPDGwr+HKZ/BW5ujNhKurSMccsZDlAXOEt0TjSwt3eyMnYtjlmAsO/LgJWJJydDrKhE9cBrwTWJSm8Ajpe1PqR+ulVJfz+M0sSitu5y9TeyIwNZ/Mis6PDBNOqZrSOuKYqi+DUvMTB130p2Ctqfr0Mi9k1XoFL/6Ub5V+21EL1QtPT75XvYifcxQmD2TSd6IbHXu/fk1JCjgt1A+Sag78xyGZaqNDdb8qgQlryHGsWdHKHUHJ8sM2nZCsD6LvBRhsA/6FuFJlOGf8u/BYOP1x1JGrF96DdXEO2II6OJITfhdMxpeLWwHebxk+w+amz6ovATwcuNWQYw1dkcfnis+DCs10L/Ms7GidvnCudzgKXmNX/h+ObuLFIE/0cz8rWvlM4Whvzv/ZwDPsNw9tL/IBm1gNMTEf/U2nwLaFjUyyjtWsAsSERpMi/7f/ZvsXgZFyMZwaSZIAAAAASUVORK5CYII=' hues_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAAwCAYAAAD+f6R/AAABIklEQVR4nO3aMW7DMBBE0ZWS+P7nDZIohXubjbAk5z1gLzDFhwDxuKquelStcH8fVd+1xl312T/Y0H31jzV452//WqN31E//YAN3FhBLACCYAEAwAYBgAgDBBACCCQAEO+uoWuWuWufax9pw1AnWGr72sQbPFwAEEwAIJgAQTAAgmABAMAGAYAIAwbwDuOGeJhhsp1EXegfwNMFg3gEArwgABBMACCYAEEwAIJgAQDABgGDeAdxw7UPtOOpS7wAmGMs7AOAdAYBgAgDBBACCCQAEEwAIJgAQbJ13ANX9x9Q7gOZRu5fyDgDYiwBAMAGAYAIAwQQAggkABBMACLbMO4D+P6beAXTeBEt5BwDsRQAgmABAMAGAYAIAwQQAggkABPsHW+IBkGJm7agAAAAASUVORK5CYII=' minus_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAADzZAAA82QHNHPbHAAAAB3RJTUUH5AQPCQkWxiVwNQAAAGlJREFUeNrt2LERgCAQRUHObuyA/iM7sByIzmFs4e9GEPKA5MYAAAAAACBHnZt5r5Vy8Oet+gIkHfwf4Yr/Aqm33+JfgAACCCCAAAIIIIAAAgggQOY8oCcjsfOAXqTNBdIvHgAAAAAg0wbIrBHGR5+81AAAAABJRU5ErkJggg==' copy_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4gcCEzsDNKqnGQAAAZpJREFUWMPtlz9Lw0AYh3+RbtLQ4tfwQ/gRpDiULh0UBKlQKIgoSJdmc1MnHfyDdauLYDfBVcQPIE4Wl0LbC6ZY7+51kBTT2OaSS8ziOyWBlzw8d+8vF+MtO480aw4pV+oAmckH7aNz6vXZ1IZ8zsTD4xN2j/eNRAAAoFopT204vWjBqtewA1AcEJGWIGuasOo1NNZqlNoeiAtCaxPGAZEJ29DrM6yubyc3BUFVrZRhM++UnJy1gGYCAKXlO8/95fXSWP2fGHBfOGsk3cy4sg4pSmaEMvCz7JdnrGwtaGdGZAPd7iJubu+VJ2UahLaBMOP6G0TiBoIgfACOM4zNgEpm+AAGzI7NgEpm+ABs+z3SPKtmRj5nzjbAmI2P0QgAQFJACoKQEiAJSQRJEiRl6MxQXgLHGWJjcy+w8bXTQalYUNovLiARBQM0mgfKh4x2sUBhDAgh9D9GUTLDBUwUQMUA5zxeANXMGBvgwtOjDaCaGWMDknt6tI9kYTODf3Jfj5YB1cyQ8vvaMAywCQPae0AlM2aV8f9zmjbAFxVWCezT1eQrAAAAAElFTkSuQmCC' circle_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAARElEQVR4nI2QwQkAMQzDrO6/s+7VkpYLxE8h4mDUAKaJSpKQxApq9gGV1UkvX13lm5EIOBLPj+Pqbp7KGe+otwP4N9cH/OwpBV7e2E4AAAAASUVORK5CYII=' plus_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAADzZAAA82QHNHPbHAAAAB3RJTUUH5AQPCQYbPwwQRwAAALZJREFUeNrt20EOQDAQBVDjNm7g/is3cJxaSbBEmzbz/spGYp7q4qdiapB1KeXNfdseUfvZosfBW0JE78PXRpin5AEAAAAAAAAAAAAAAAAAAAAAAACQLbee7e8er+ecHWNkG/yJ4BPI+vZtggAAAAAAAAAAAAAAAACQNdseMbc4i9d1H3BeZOsFbpVYjTgnaBMEAAAAAAAAAAAAAAAAAAAAAABIBjBK2TrEChjy5+lrvhSktVfSAYIsKVsYNMYvAAAAAElFTkSuQmCC' folder_b64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAGrSURBVDjLxZO7ihRBFIa/6u0ZW7GHBUV0UQQTZzd3QdhMQxOfwMRXEANBMNQX0MzAzFAwEzHwARbNFDdwEd31Mj3X7a6uOr9BtzNjYjKBJ6nicP7v3KqcJFaxhBVtZUAK8OHlld2st7Xl3DJPVONP+zEUV4HqL5UDYHr5xvuQAjgl/Qs7TzvOOVAjxjlC+ePSwe6DfbVegLVuT4r14eTr6zvA8xSAoBLzx6pvj4l+DZIezuVkG9fY2H7YRQIMZIBwycmzH1/s3F8AapfIPNF3kQk7+kw9PWBy+IZOdg5Ug3mkAATy/t0usovzGeCUWTjCz0B+Sj0ekfdvkZ3abBv+U4GaCtJ1iEm6ANQJ6fEzrG/engcKw/wXQvEKxSEKQxRGKE7Izt+DSiwBJMUSm71rguMYhQKrBygOIRStf4TiFFRBvbRGKiQLWP29yRSHKBTtfdBmHs0BUpgvtgF4yRFR+NUKi0XZcYjCeCG2smkzLAHkbRBmP0/Uk26O5YnUActBp1GsAI+S5nRJJJal5K1aAMrq0d6Tm9uI6zjyf75dAe6tx/SsWeD//o2/Ab6IH3/h25pOAAAAAElFTkSuQmCC'
712.25
1,510
0.963233
350
11,396
31.32
0.748571
0.009579
0.01642
0.020525
0.069057
0.055647
0.044335
0.044335
0
0
0
0.096555
0.003949
11,396
15
1,511
759.733333
0.869175
0
0
0
0
0.866667
0.979115
0.979115
0
1
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
1
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
7
8ff2eaa72868fab3cee8788194fcefaf48dafb08
13,570
py
Python
src/executor/clients/geth.py
marioevz/kintsugi_testing
49d2275f16f8d9f2a1160fe05690f8a5c44337d3
[ "MIT" ]
4
2021-11-13T16:20:18.000Z
2021-12-21T20:36:04.000Z
src/executor/clients/geth.py
marioevz/kintsugi_testing
49d2275f16f8d9f2a1160fe05690f8a5c44337d3
[ "MIT" ]
null
null
null
src/executor/clients/geth.py
marioevz/kintsugi_testing
49d2275f16f8d9f2a1160fe05690f8a5c44337d3
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import re # Client specific messages: MESSAGES = { 'UnknownHeader': 'unknown header', 'UnknownPayload': 'unknown payload', 'FinalizedBlockHash_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'FinalizedBlockHash_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'FinalizedBlockHash_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ForkchoiceStateV1.finalizedBlockHash of type common.Hash', 'FinalizedBlockHash_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ForkchoiceStateV1.finalizedBlockHash of type common.Hash', 'HeadBlockHash_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'HeadBlockHash_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'HeadBlockHash_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ForkchoiceStateV1.headBlockHash of type common.Hash', 'HeadBlockHash_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ForkchoiceStateV1.headBlockHash of type common.Hash', 'SafeBlockHash_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'SafeBlockHash_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'SafeBlockHash_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ForkchoiceStateV1.safeBlockHash of type common.Hash', 'SafeBlockHash_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ForkchoiceStateV1.safeBlockHash of type common.Hash', 'Random_TooShort': 'invalid argument 1: hex string has length 62, want 64 for common.Hash', 'Random_TooLong': 'invalid argument 1: hex string has length 66, want 64 for common.Hash', 'Random_Odd': 'invalid argument 1: json: cannot unmarshal hex string of odd length into Go struct field PayloadAttributesV1.random of type common.Hash', 'Random_Prefix': 'invalid argument 1: json: cannot unmarshal hex string without 0x prefix into Go struct field PayloadAttributesV1.random of type common.Hash', 'FeeRecipient_TooShort': 'invalid argument 1: hex string has length 38, want 40 for common.Address', 'FeeRecipient_TooLong': 'invalid argument 1: hex string has length 42, want 40 for common.Address', 'FeeRecipient_Odd': 'invalid argument 1: json: cannot unmarshal hex string of odd length into Go struct field PayloadAttributesV1.feeRecipient of type common.Address', 'FeeRecipient_Prefix': 'invalid argument 1: json: cannot unmarshal hex string without 0x prefix into Go struct field PayloadAttributesV1.feeRecipient of type common.Address', 'Timestamp_Prefix': 'invalid argument 1: json: cannot unmarshal hex string without 0x prefix into Go struct field PayloadAttributesV1.timestamp of type hexutil.Uint64', 'Timestamp_LeadingZeros': 'invalid argument 1: json: cannot unmarshal hex number with leading zero digits into Go struct field PayloadAttributesV1.timestamp of type hexutil.Uint64', 'Timestamp_Empty': 'invalid argument 1: json: cannot unmarshal hex string "0x" into Go struct field PayloadAttributesV1.timestamp of type hexutil.Uint64', 'Timestamp_Long': 'invalid argument 1: json: cannot unmarshal hex number > 64 bits into Go struct field PayloadAttributesV1.timestamp of type hexutil.Uint64', 'BlockHash_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'BlockHash_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.blockHash of type common.Hash', 'BlockHash_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.blockHash of type common.Hash', 'BlockHash_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'Coinbase_TooLong': 'invalid argument 0: hex string has length 42, want 40 for common.Address', 'Coinbase_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.coinbase of type common.Address', 'Coinbase_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.coinbase of type common.Address', 'Coinbase_TooShort': 'invalid argument 0: hex string has length 38, want 40 for common.Address', 'ExtraData_TooLong': 'invalid extradata length: 33', 'ExtraData_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.extraData of type hexutil.Bytes', 'ExtraData_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.extraData of type hexutil.Bytes', 'LogsBloom_TooLong': 'REPLACE', 'LogsBloom_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.logsBloom of type hexutil.Bytes', 'LogsBloom_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.logsBloom of type hexutil.Bytes', 'LogsBloom_Short': 'REPLACE', 'ParentHash_Exec_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'ParentHash_Exec_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.parentHash of type common.Hash', 'ParentHash_Exec_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.parentHash of type common.Hash', 'ParentHash_Exec_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'Random_Exec_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'Random_Exec_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.random of type common.Hash', 'Random_Exec_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.random of type common.Hash', 'Random_Exec_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'ReceiptRoot_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'ReceiptRoot_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.receiptRoot of type common.Hash', 'ReceiptRoot_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.receiptRoot of type common.Hash', 'ReceiptRoot_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'StateRoot_TooLong': 'invalid argument 0: hex string has length 66, want 64 for common.Hash', 'StateRoot_Odd': 'invalid argument 0: json: cannot unmarshal hex string of odd length into Go struct field ExecutableDataV1.stateRoot of type common.Hash', 'StateRoot_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.stateRoot of type common.Hash', 'StateRoot_TooShort': 'invalid argument 0: hex string has length 62, want 64 for common.Hash', 'BlockNumber_Empty': 'invalid argument 0: json: cannot unmarshal hex string "0x" into Go struct field ExecutableDataV1.blockNumber of type hexutil.Uint64', 'BlockNumber_LeadingZeros': 'invalid argument 0: json: cannot unmarshal hex number with leading zero digits into Go struct field ExecutableDataV1.blockNumber of type hexutil.Uint64', 'BlockNumber_TooLong': 'invalid argument 0: json: cannot unmarshal hex number > 64 bits into Go struct field ExecutableDataV1.blockNumber of type hexutil.Uint64', 'BlockNumber_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.blockNumber of type hexutil.Uint64', 'GasLimit_Empty': 'invalid argument 0: json: cannot unmarshal hex string "0x" into Go struct field ExecutableDataV1.gasLimit of type hexutil.Uint64', 'GasLimit_LeadingZeros': 'invalid argument 0: json: cannot unmarshal hex number with leading zero digits into Go struct field ExecutableDataV1.gasLimit of type hexutil.Uint64', 'GasLimit_TooLong': 'invalid argument 0: json: cannot unmarshal hex number > 64 bits into Go struct field ExecutableDataV1.gasLimit of type hexutil.Uint64', 'GasLimit_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.gasLimit of type hexutil.Uint64', 'GasUsed_Empty': 'invalid argument 0: json: cannot unmarshal hex string "0x" into Go struct field ExecutableDataV1.gasUsed of type hexutil.Uint64', 'GasUsed_LeadingZeros': 'invalid argument 0: json: cannot unmarshal hex number with leading zero digits into Go struct field ExecutableDataV1.gasUsed of type hexutil.Uint64', 'GasUsed_TooLong': 'invalid argument 0: json: cannot unmarshal hex number > 64 bits into Go struct field ExecutableDataV1.gasUsed of type hexutil.Uint64', 'GasUsed_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.gasUsed of type hexutil.Uint64', 'Timestamp_Exec_Empty': 'invalid argument 0: json: cannot unmarshal hex string "0x" into Go struct field ExecutableDataV1.timestamp of type hexutil.Uint64', 'Timestamp_Exec_LeadingZeros': 'invalid argument 0: json: cannot unmarshal hex number with leading zero digits into Go struct field ExecutableDataV1.timestamp of type hexutil.Uint64', 'Timestamp_Exec_TooLong': 'invalid argument 0: json: cannot unmarshal hex number > 64 bits into Go struct field ExecutableDataV1.timestamp of type hexutil.Uint64', 'Timestamp_Exec_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.timestamp of type hexutil.Uint64', 'BaseFeePerGas_Empty': 'invalid argument 0: json: cannot unmarshal hex string "0x" into Go struct field ExecutableDataV1.baseFeePerGas of type *hexutil.Big', 'BaseFeePerGas_LeadingZeros': 'invalid argument 0: json: cannot unmarshal hex number with leading zero digits into Go struct field ExecutableDataV1.baseFeePerGas of type *hexutil.Big', 'BaseFeePerGas_TooLong': 'invalid argument 0: json: cannot unmarshal hex number > 256 bits into Go struct field ExecutableDataV1.baseFeePerGas of type *hexutil.Big', 'BaseFeePerGas_Prefix': 'invalid argument 0: json: cannot unmarshal hex string without 0x prefix into Go struct field ExecutableDataV1.baseFeePerGas of type *hexutil.Big', } # Geth Client Specific Methods def port_num() -> int: return 8545 def prepare_genesis(config) -> dict: # Geth uses a 1:1 copy of the genesis block config["genesis_path"] = '' def prepare_init_command(config) -> list[str]: if not "genesis_path" in config: raise Exception("Config missing genesis_path") if not "client_binary" in config: raise Exception("Config missing client_binary") if not "data_dir_path" in config: raise Exception("Config missing data_dir_path") comm = [config['client_binary'], '--catalyst', '--datadir', config['data_dir_path']] if 'verbose' in config and config['verbose']: comm += ['--verbosity', '5'] comm += ['init', config["genesis_path"]] return comm def prepare_start_command(config) -> list[str]: if not "client_binary" in config: raise Exception("Config missing client_binary") if not "data_dir_path" in config: raise Exception("Config missing data_dir_path") comm = [config['client_binary'], '--catalyst', '--http', '--ws', '-http.api', "engine,eth", '--datadir', config['data_dir_path']] if 'verbose' in config and config['verbose']: comm += ['--verbosity', '5'] return comm def detect_start(proc) -> bool: start_detected = False while not start_detected: line = proc.stderr.readline().strip() if not line: break m = re.search(r'HTTP server started', line) if m: print(f"Client start detected.") start_detected = True return start_detected
89.276316
288
0.707074
1,711
13,570
5.537697
0.087084
0.113984
0.101319
0.116095
0.914829
0.906174
0.882955
0.878839
0.858892
0.853509
0
0.028301
0.221444
13,570
152
289
89.276316
0.868528
0.008696
0
0.12069
0
0.431034
0.773887
0.136367
0
0
0
0
0
1
0.043103
false
0
0.008621
0.008621
0.086207
0.008621
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
8ff3218b1440e6645a7f1db5416f26fab792c4ad
32,842
py
Python
venv/lib/python3.8/site-packages/azure/mgmt/billing/operations/_billing_role_assignments_operations.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/azure/mgmt/billing/operations/_billing_role_assignments_operations.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/azure/mgmt/billing/operations/_billing_role_assignments_operations.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
2
2021-05-23T16:46:31.000Z
2021-05-26T23:51:09.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class BillingRoleAssignmentsOperations(object): """BillingRoleAssignmentsOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: The version of the API to be used with the client request. The current version is 2020-05-01. Constant value: "2020-05-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2020-05-01" self.config = config def get_by_billing_account( self, billing_account_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Gets a role assignment for the caller on a billing account. The operation is supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.get_by_billing_account.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get_by_billing_account.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleAssignments/{billingRoleAssignmentName}'} def delete_by_billing_account( self, billing_account_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Deletes a role assignment for the caller on a billing account. The operation is supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.delete_by_billing_account.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized delete_by_billing_account.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleAssignments/{billingRoleAssignmentName}'} def get_by_invoice_section( self, billing_account_name, billing_profile_name, invoice_section_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Gets a role assignment for the caller on an invoice section. The operation is supported for billing accounts with agreement type Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param invoice_section_name: The ID that uniquely identifies an invoice section. :type invoice_section_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.get_by_invoice_section.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'), 'invoiceSectionName': self._serialize.url("invoice_section_name", invoice_section_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get_by_invoice_section.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleAssignments/{billingRoleAssignmentName}'} def delete_by_invoice_section( self, billing_account_name, billing_profile_name, invoice_section_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Deletes a role assignment for the caller on an invoice section. The operation is supported for billing accounts with agreement type Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param invoice_section_name: The ID that uniquely identifies an invoice section. :type invoice_section_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.delete_by_invoice_section.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'), 'invoiceSectionName': self._serialize.url("invoice_section_name", invoice_section_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized delete_by_invoice_section.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleAssignments/{billingRoleAssignmentName}'} def get_by_billing_profile( self, billing_account_name, billing_profile_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Gets a role assignment for the caller on a billing profile. The operation is supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.get_by_billing_profile.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get_by_billing_profile.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleAssignments/{billingRoleAssignmentName}'} def delete_by_billing_profile( self, billing_account_name, billing_profile_name, billing_role_assignment_name, custom_headers=None, raw=False, **operation_config): """Deletes a role assignment for the caller on a billing profile. The operation is supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param billing_role_assignment_name: The ID that uniquely identifies a role assignment. :type billing_role_assignment_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: BillingRoleAssignment or ClientRawResponse if raw=true :rtype: ~azure.mgmt.billing.models.BillingRoleAssignment or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ # Construct URL url = self.delete_by_billing_profile.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'), 'billingRoleAssignmentName': self._serialize.url("billing_role_assignment_name", billing_role_assignment_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('BillingRoleAssignment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized delete_by_billing_profile.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleAssignments/{billingRoleAssignmentName}'} def list_by_billing_account( self, billing_account_name, custom_headers=None, raw=False, **operation_config): """Lists the role assignments for the caller on a billing account. The operation is supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of BillingRoleAssignment :rtype: ~azure.mgmt.billing.models.BillingRoleAssignmentPaged[~azure.mgmt.billing.models.BillingRoleAssignment] :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_billing_account.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.BillingRoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_billing_account.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleAssignments'} def list_by_invoice_section( self, billing_account_name, billing_profile_name, invoice_section_name, custom_headers=None, raw=False, **operation_config): """Lists the role assignments for the caller on an invoice section. The operation is supported for billing accounts with agreement type Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param invoice_section_name: The ID that uniquely identifies an invoice section. :type invoice_section_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of BillingRoleAssignment :rtype: ~azure.mgmt.billing.models.BillingRoleAssignmentPaged[~azure.mgmt.billing.models.BillingRoleAssignment] :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_invoice_section.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'), 'invoiceSectionName': self._serialize.url("invoice_section_name", invoice_section_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.BillingRoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_invoice_section.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleAssignments'} def list_by_billing_profile( self, billing_account_name, billing_profile_name, custom_headers=None, raw=False, **operation_config): """Lists the role assignments for the caller on a billing profile. The operation is supported for billing accounts with agreement type Microsoft Customer Agreement. :param billing_account_name: The ID that uniquely identifies a billing account. :type billing_account_name: str :param billing_profile_name: The ID that uniquely identifies a billing profile. :type billing_profile_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of BillingRoleAssignment :rtype: ~azure.mgmt.billing.models.BillingRoleAssignmentPaged[~azure.mgmt.billing.models.BillingRoleAssignment] :raises: :class:`ErrorResponseException<azure.mgmt.billing.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_billing_profile.metadata['url'] path_format_arguments = { 'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'), 'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.BillingRoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_billing_profile.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleAssignments'}
49.460843
242
0.687717
3,447
32,842
6.32434
0.05512
0.042385
0.037156
0.034404
0.956284
0.955734
0.955183
0.953624
0.953624
0.953624
0
0.003119
0.228762
32,842
663
243
49.535445
0.857555
0.32635
0
0.827922
0
0.006494
0.177192
0.106374
0
0
0
0
0
1
0.051948
false
0
0.00974
0
0.136364
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
64ffcbc9871bb721155e640aa7a15ced8b86ff0d
21,103
py
Python
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_WCN_TestScript.py
srcarter3/awips2
37f31f5e88516b9fd576eaa49d43bfb762e1d174
[ "Apache-2.0" ]
null
null
null
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_WCN_TestScript.py
srcarter3/awips2
37f31f5e88516b9fd576eaa49d43bfb762e1d174
[ "Apache-2.0" ]
null
null
null
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_WCN_TestScript.py
srcarter3/awips2
37f31f5e88516b9fd576eaa49d43bfb762e1d174
[ "Apache-2.0" ]
1
2021-10-30T00:03:05.000Z
2021-10-30T00:03:05.000Z
## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: Raytheon Company # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## # ---------------------------------------------------------------------------- # This software is in the public domain, furnished "as is", without technical # support, and with no warranty, express or implied, as to its usefulness for # any purpose. # # Headlines Timing # # Author: # ---------------------------------------------------------------------------- scripts = [ { "commentary": "Clear out all Hazards Table and Grids.", "name": "WCN_0", "productType": None, "clearHazardsTable": 1, "checkStrings": [], "decodeVTEC": 0, }, { "commentary": "Testing midnight issuance - 3 hr event.", "name": "WCN_1", "drtTime": "20100101_0510", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 3, "TO.A:0111", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 111", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.NEW.KTBW.TO.A.0111.100101T0510Z-100101T0800Z/", "The National Weather Service has issued Tornado Watch 111 in effect until 3 AM EST early this morning for the following areas", "In Florida this watch includes 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.NEW.KTBW.TO.A.0111.100101T0510Z-100101T0800Z/", "The National Weather Service has issued Tornado Watch 111 in effect until 3 AM EST early this morning for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing continuation.", "name": "WCN_2", "drtTime": "20100101_0530", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 3, "TO.A:0111", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 111", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.CON.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "Tornado Watch 111 remains valid until 3 AM EST early this morning for the following areas", "In Florida this watch includes 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.CON.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "Tornado Watch 111 remains valid until 3 AM EST early this morning for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing expire before expire time.", "name": "WCN_3", "drtTime": "20100101_0745", "productType": "Hazard_WCN_Local", "decodeVTEC": 0, "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 3, "TO.A:0111", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 111", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.EXP.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "The National Weather Service will allow Tornado Watch 111 to expire at 3 AM EST early this morning for the following areas", "In Florida this allows to expire 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.EXP.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "The National Weather Service will allow Tornado Watch 111 to expire at 3 AM EST early this morning for the following areas", "This allows to expire the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing expire after expire time.", "name": "WCN_4", "drtTime": "20100101_0815", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 3, "TO.A:0111", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 111", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.EXP.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "The National Weather Service has allowed Tornado Watch 111 to expire for the following areas", "In Florida this allows to expire 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.EXP.KTBW.TO.A.0111.000000T0000Z-100101T0800Z/", "The National Weather Service has allowed Tornado Watch 111 to expire for the following areas", "This allows to expire the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing new issuance of SV.A", "name": "WCN_5", "drtTime": "20100101_0900", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 7, "SV.A:0112", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 112", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.NEW.KTBW.SV.A.0112.100101T0900Z-100101T1200Z/", "The National Weather Service has issued Severe Thunderstorm Watch 112 in effect until 7 AM EST this morning for the following areas", "In Florida this watch includes 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.NEW.KTBW.SV.A.0112.100101T0900Z-100101T1200Z/", "The National Weather Service has issued Severe Thunderstorm Watch 112 in effect until 7 AM EST this morning for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing cancel of SV.A", "name": "WCN_6", "drtTime": "20100101_1100", "productType": "Hazard_WCN_Local", "createGrids": [ ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 112", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.CAN.KTBW.SV.A.0112.000000T0000Z-100101T1200Z/", "The National Weather Service has cancelled Severe Thunderstorm Watch 112 for the following areas", "In Florida this cancels 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.CAN.KTBW.SV.A.0112.000000T0000Z-100101T1200Z/", "The National Weather Service has cancelled Severe Thunderstorm Watch 112 for the following areas", "This cancels the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing new issuance of SV.A", "name": "WCN_7", "drtTime": "20100101_1205", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 12, "SV.A:0115", ["FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 115", "National Weather Service Tampa Bay Ruskin FL", "FLC017-", "/E.NEW.KTBW.SV.A.0115.100101T1205Z-100101T1700Z/", "The National Weather Service has issued Severe Thunderstorm Watch 115 in effect until noon EST today for the following areas", "In Florida this watch includes 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.NEW.KTBW.SV.A.0115.100101T1205Z-100101T1700Z/", "The National Weather Service has issued Severe Thunderstorm Watch 115 in effect until noon EST today for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing EXA SV.A", "name": "WCN_8", "drtTime": "20100101_1300", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 12, "SV.A:0115", ["FLC017","FLC053","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 115", "National Weather Service Tampa Bay Ruskin FL", "FLC053-", "/E.EXA.KTBW.SV.A.0115.000000T0000Z-100101T1700Z/", "The National Weather Service has extended Severe Thunderstorm Watch 115 to include the following areas until noon EST today", "In Florida this watch includes 1 county", "In west central Florida", "Hernando", "FLC017-", "/E.CON.KTBW.SV.A.0115.000000T0000Z-100101T1700Z/", "Severe Thunderstorm Watch 115 remains valid until noon EST today for the following areas", "In Florida this watch includes 1 county", "In west central Florida", "Citrus", "GMZ870-", "/E.CON.KTBW.SV.A.0115.000000T0000Z-100101T1700Z/", "Severe Thunderstorm Watch 115 remains valid until noon EST today for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing CON after EXA SV.A", "name": "WCN_9", "drtTime": "20100101_1400", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 12, "SV.A:0115", ["FLC017","FLC053","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 115", "National Weather Service Tampa Bay Ruskin FL", "FLC017-053-", "/E.CON.KTBW.SV.A.0115.000000T0000Z-100101T1700Z/", "Severe Thunderstorm Watch 115 remains valid until noon EST today for the following areas", "In Florida this watch includes 2 counties", "In west central Florida", "Citrus Hernando", "GMZ870-", "/E.CON.KTBW.SV.A.0115.000000T0000Z-100101T1700Z/", "Severe Thunderstorm Watch 115 remains valid until noon EST today for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing EXT, EXB after EXA SV.A", "name": "WCN_10", "drtTime": "20100101_1500", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 14, "SV.A:0115", ["FLC101","FLC017","FLC053","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watch 115", "National Weather Service Tampa Bay Ruskin FL", "FLC101-", "/E.EXB.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "The National Weather Service has extended Severe Thunderstorm Watch 115 to include the following areas until 2 PM EST this afternoon", "In Florida this watch includes 1 county", "In west central Florida", "Pasco", "FLC017-053-", "/E.EXT.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "Severe Thunderstorm Watch 115, previously in effect until noon EST today, is now in effect until 2 PM EST this afternoon for the following areas", "In Florida this watch includes 2 counties", "In west central Florida", "Citrus Hernando", "GMZ870-", "/E.EXT.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "Severe Thunderstorm Watch 115, previously in effect until noon EST today, is now in effect until 2 PM EST this afternoon for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing NEW/CAN TO.A", "name": "WCN_11", "drtTime": "20100101_1700", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 14, "TO.A:0116", ["FLC101","FLC017"]), ("Fcst", "Hazards", "DISCRETE", 14, 19, "TO.A:0116", ["FLC101","FLC017"]), ("Fcst", "Hazards", "DISCRETE", 0, 14, "SV.A:0115", ["GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watches 115/116", "National Weather Service Tampa Bay Ruskin FL", "FLC017-101-", "/E.CAN.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "/E.NEW.KTBW.TO.A.0116.100101T1700Z-100102T0000Z/", "The National Weather Service has issued Tornado Watch 116 until 7 PM EST this evening which replaces a portion of Severe Thunderstorm Watch 115. The new watch is valid for the following areas", "In Florida the new watch includes 2 counties", "In west central Florida", "Citrus Pasco", "FLC053-", "/E.CAN.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "The National Weather Service has cancelled Severe Thunderstorm Watch 115 for the following areas", "In Florida this cancels 1 county", "In west central Florida", "Hernando", "GMZ870-", "/E.CON.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "Severe Thunderstorm Watch 115 remains valid until 2 PM EST this afternoon for the following areas", "This watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM" ], }, { "commentary": "Testing before EXP/EXA TO.A", "name": "WCN_12", "drtTime": "20100101_1850", "decodeVTEC": 0, #don't decode the VTEC this time "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 0, 14, "TO.A:0116", ["FLC101","FLC017"]), ("Fcst", "Hazards", "DISCRETE", 14, 19, "TO.A:0116", ["FLC101","FLC017","GMZ870"]), ("Fcst", "Hazards", "DISCRETE", 0, 14, "SV.A:0115", ["GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watches 115/116", "National Weather Service Tampa Bay Ruskin FL", "GMZ870-", "/E.EXP.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "/E.EXB.KTBW.TO.A.0116.100101T1900Z-100102T0000Z/", "The National Weather Service has issued Tornado Watch 116 until 7 PM EST this evening. Severe Thunderstorm Watch 115 will be allowed to expire. The new watch is valid for the following areas", "The new watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM", "FLC017-101-", "/E.CON.KTBW.TO.A.0116.000000T0000Z-100102T0000Z/", "Tornado Watch 116 remains valid until 7 PM EST this evening for the following areas", "In Florida this watch includes 2 counties", "In west central Florida", "Citrus Pasco", ], }, { "commentary": "Testing after EXP/EXA TO.A", "name": "WCN_13", "drtTime": "20100101_1910", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ("Fcst", "Hazards", "DISCRETE", 14, 19, "TO.A:0116", ["FLC101","FLC017","GMZ870"]), ], "checkStrings": ["EXPERIMENTAL...Watch County Notification for Watches 115/116", "National Weather Service Tampa Bay Ruskin FL", "GMZ870-", "/E.EXP.KTBW.SV.A.0115.000000T0000Z-100101T1900Z/", "/E.EXA.KTBW.TO.A.0116.000000T0000Z-100102T0000Z/", "The National Weather Service has issued Tornado Watch 116 until 7 PM EST this evening. Severe Thunderstorm Watch 115 has expired. The new watch is valid for the following areas", "The new watch includes the following adjacent coastal waters", "Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM", "FLC017-101-", "/E.CON.KTBW.TO.A.0116.000000T0000Z-100102T0000Z/", "Tornado Watch 116 remains valid until 7 PM EST this evening for the following areas", "In Florida this watch includes 2 counties", "In west central Florida", "Citrus Pasco", ], }, { "commentary": "Canceling out all hazards.", "name": "WCN_14", "productType": "Hazard_WCN_Local", "createGrids": [ ("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"), ], "checkStrings": [], "decodeVTEC": 0, }, { "commentary": "Deleting hazard grids.", "name": "WCN_15", "productType": "Hazard_WCN_Local", "clearHazardsTable": 1, "checkStrings": [], "decodeVTEC":0, }, ] import TestScript def testScript(self, dataMgr): defaults = { "database": "<site>_GRID__Fcst_00000000_0000", "publishGrids": 0, "decodeVTEC": 1, "gridsStartTime": "20100101_0500", "orderStrings": 1, "vtecMode": "E", "cmdLineVars": "{('Issued By', 'issuedBy'): None}", "deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")], } return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
48.512644
215
0.545704
2,239
21,103
5.114337
0.127736
0.044014
0.059558
0.047157
0.839665
0.825954
0.812505
0.810672
0.805781
0.796
0
0.116213
0.33128
21,103
434
216
48.624424
0.695224
0.051557
0
0.703704
0
0.031746
0.609815
0.078468
0
0
0
0
0
1
0.002646
false
0
0.002646
0
0.007937
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
56d3f2f0402425455db8ae84e04168f5ca4df21c
191
py
Python
bentoml/pycaret.py
francoisserra/BentoML
213e9e9b39e055286f2649c733907df88e6d2503
[ "Apache-2.0" ]
1
2021-06-12T17:04:07.000Z
2021-06-12T17:04:07.000Z
bentoml/pycaret.py
francoisserra/BentoML
213e9e9b39e055286f2649c733907df88e6d2503
[ "Apache-2.0" ]
4
2021-05-16T08:06:25.000Z
2021-11-13T08:46:36.000Z
bentoml/pycaret.py
francoisserra/BentoML
213e9e9b39e055286f2649c733907df88e6d2503
[ "Apache-2.0" ]
null
null
null
from ._internal.frameworks.pycaret import load from ._internal.frameworks.pycaret import save from ._internal.frameworks.pycaret import load_runner __all__ = ["load", "load_runner", "save"]
31.833333
53
0.801047
24
191
6
0.375
0.25
0.458333
0.604167
0.784722
0.541667
0
0
0
0
0
0
0.094241
191
5
54
38.2
0.83237
0
0
0
0
0
0.099476
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
56d8730b6e1f7bac6928b314d33a0b7c76034ef1
6,325
py
Python
tests/server/test_article_pages.py
WGierke/weightlifting_germany_server
2477ce6d7c6a19dfe7b79728ddcb1b6f6ccf5756
[ "Apache-2.0" ]
1
2016-09-08T13:19:32.000Z
2016-09-08T13:19:32.000Z
tests/server/test_article_pages.py
WGierke/weightlifting_germany_server
2477ce6d7c6a19dfe7b79728ddcb1b6f6ccf5756
[ "Apache-2.0" ]
1
2016-09-05T08:42:07.000Z
2017-10-12T08:12:06.000Z
tests/server/test_article_pages.py
WGierke/weightlifting_germany_server
2477ce6d7c6a19dfe7b79728ddcb1b6f6ccf5756
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- from test_main_server import ServerTestCase import json import time import datetime class ArticleTestCase(ServerTestCase): def test_adding_articles(self): url = "http://gewichtheben.blauweiss65-schwedt.de/?page_id=6858&paged=1" date = "1456182000" heading = "My Article Ä" content = "My Content Ä" publisher = "My Publisher" response = self.get_authenticated("/get_articles?publisher=" + publisher) self.assertEqual(response.normal_body, '{"result": []}') params = {"url": url, "date": date, "heading": heading, "content": content, "publisher": publisher} response = self.post_authenticated("/add_article", params=params) self.assertEqual(response.normal_body, 'Added article successfully') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 1) self.assertEqual(result[0]["url"], url) response = self.post_authenticated("/add_article", params=params) self.assertEqual(response.normal_body, 'This article is already saved') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 1) self.assertEqual(result[0]["url"], url) params = {"url": url + "_2", "date": date, "heading": heading, "content": content, "publisher": publisher} response = self.post_authenticated("/add_article", params=params) self.assertEqual(response.normal_body, 'Added article successfully') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 2) self.assertEqual(result[0]["url"], url) self.assertEqual(result[1]["url"], url + "_2") def test_deleting_articles(self): url = "http://gewichtheben.blauweiss65-schwedt.de/?page_id=6858&paged=1" date = "1456182000" heading = "My Article Ä" content = "My Content Ä" publisher = "My Publisher" response = self.get_authenticated("/get_articles?publisher=" + publisher) self.assertEqual(response.normal_body, '{"result": []}') params = {"url": url, "date": date, "heading": heading, "content": content, "publisher": publisher} response = self.post_authenticated("/add_article", params=params) params["url"] = url + "_2" response = self.post_authenticated("/add_article", params=params) response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 2) self.assertEqual(result[0]["url"], url) self.assertEqual(result[1]["url"], url + "_2") response = self.post_authenticated("/delete_article", params={"url": url}) self.assertEqual(response.normal_body, 'Deleted article successfully') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 1) self.assertEqual(result[0]["url"], url + "_2") response = self.post_authenticated("/delete_article", params={"url": url}) self.assertEqual(response.normal_body, 'No article found') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 1) self.assertEqual(result[0]["url"], url + "_2") response = self.post_authenticated("/delete_article", params={"url": url + "_2"}) self.assertEqual(response.normal_body, 'Deleted article successfully') response = self.get_authenticated("/get_articles?publisher=" + publisher) result = json.loads(response.normal_body)["result"] self.assertEqual(len(result), 0) def test_article_exists(self): response = self.get_authenticated("/get_articles") self.assertEqual(response.normal_body, '{"result": []}') url = "http://gewichtheben.blauweiss65-schwedt.de/?page_id=6858&paged=1" date = "1456182000" heading = "My Article Ä" content = "My Content Ä" publisher = "My Publisher" image = "My Image" params = {"url": url, "date": date, "heading": heading, "content": content, "publisher": publisher, "image": image} response = self.post_authenticated("/article_exists", params=params) self.assertEqual(response.normal_body, "No") response = self.post_authenticated("/add_article", params=params) response = self.post_authenticated("/article_exists", params=params) self.assertEqual(response.normal_body, "Yes") response = self.post_authenticated("/delete_article", params=params) response = self.post_authenticated("/article_exists", params=params) self.assertEqual(response.normal_body, "No") def test_article_getting(self): response = self.get_authenticated("/get_articles") self.assertEqual(response.normal_body, '{"result": []}') url = "http://gewichtheben.blauweiss65-schwedt.de/?page_id=6858&paged=1" date = str(time.mktime(datetime.date(2016, 6, 24).timetuple())) heading = "My Article Ä" content = "My Content Ä" publisher = "My Publisher" image = "My image" params = {"url": url, "date": date, "heading": heading, "content": content, "publisher": publisher, "image": image} response = self.post_authenticated("/add_article", params=params) response = self.get_authenticated("/get_article", params={"url": url}) result = json.loads(response.normal_body)["result"] self.assertEqual(result["url"], url) self.assertEqual(result["date"], date) self.assertEqual(result["heading"], heading.decode("utf-8")) self.assertEqual(result["content"], content.decode("utf-8")) self.assertEqual(result["publisher"], publisher.decode("utf-8")) self.assertEqual(result["image"], image.decode("utf-8"))
49.414063
123
0.657866
701
6,325
5.801712
0.108417
0.1254
0.092943
0.099828
0.896976
0.890337
0.866978
0.856651
0.856651
0.843373
0
0.018043
0.193834
6,325
127
124
49.80315
0.779565
0.006008
0
0.742857
0
0
0.226094
0.034368
0
0
0
0
0.32381
1
0.038095
false
0
0.038095
0
0.085714
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
713c22a447398eb999418088fad79385d9b8c871
114
py
Python
pc4store/config.py
pc4store/pc4store-py
9bdab83ffb47c43da367018ed695d860340ba6a8
[ "MIT" ]
1
2021-04-09T18:37:17.000Z
2021-04-09T18:37:17.000Z
pc4store/config.py
pc4store/pc4store-py
9bdab83ffb47c43da367018ed695d860340ba6a8
[ "MIT" ]
null
null
null
pc4store/config.py
pc4store/pc4store-py
9bdab83ffb47c43da367018ed695d860340ba6a8
[ "MIT" ]
null
null
null
HOST = 'https://api.pc4.store/v1' PUBLIC_KEY = '69f72437e2e359a3e5c29fe9a7e0d509345cc57b7bfca0b470598d679a349806'
38
79
0.842105
9
114
10.555556
1
0
0
0
0
0
0
0
0
0
0
0.416667
0.052632
114
2
80
57
0.462963
0
0
0
0
0
0.77193
0.561404
0
1
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
8
85a51fcff592509f6b4b321bbf1dc2f7a10d7d70
4,864
py
Python
attribution/solver.py
chihkuanyeh/JBShap
60a2c8095e7b3328d877bef57a90480a9a1f6c80
[ "MIT" ]
null
null
null
attribution/solver.py
chihkuanyeh/JBShap
60a2c8095e7b3328d877bef57a90480a9a1f6c80
[ "MIT" ]
null
null
null
attribution/solver.py
chihkuanyeh/JBShap
60a2c8095e7b3328d877bef57a90480a9a1f6c80
[ "MIT" ]
null
null
null
import numpy as np class Solver(): def __init__(self, model, batch_size): self.model = model self.batch_size = batch_size def generate_forward_owen(self, explicand, baseline, players, paths, mask=True, ret_cfs=False, total =False): ret = [] all_cfs = [] shape = explicand.shape batched_shape = list(shape) remaining_batch_size = (len(players) + 1) * len(paths) batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 total_count =0 for i, path in enumerate(paths): cfs[count] = baseline[0] prev = cfs[count] count += 1 total_count+=1 if count == self.batch_size or count == remaining_batch_size: if ret_cfs: all_cfs.append(cfs) ret.append(self.model(cfs).numpy()) remaining_batch_size -= count batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 for j, player in enumerate(players): if mask: cfs[count] = prev * ( 1 - players[paths[i][j]]) + explicand[0] * players[paths[i][j]] else: x = players[paths[i][j]][0] y = players[paths[i][j]][1] cfs[count] = prev cfs[count, x, y, :] = explicand[0, x, y, :] #prev = cfs[count] count += 1 total_count+=1 if count == self.batch_size or count == remaining_batch_size: if ret_cfs: all_cfs.append(cfs) ret.append(self.modfel(cfs).numpy()) remaining_batch_size -= count batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 if total: if ret_cfs: return np.concatenate(ret), np.concatenate(all_cfs), total_count else: return np.concatenate(ret), total_count if ret_cfs: return np.concatenate(ret), np.concatenate(all_cfs) return np.concatenate(ret) def generate_forward(self, explicand, baseline, players, paths, mask=True, ret_cfs=False, total =False): ret = [] all_cfs = [] shape = explicand.shape batched_shape = list(shape) remaining_batch_size = (len(players) + 1) * len(paths) batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 total_count =0 for i, path in enumerate(paths): cfs[count] = baseline[0] prev = cfs[count] count += 1 total_count+=1 if count == self.batch_size or count == remaining_batch_size: if ret_cfs: all_cfs.append(cfs) ret.append(self.model(cfs).numpy()) remaining_batch_size -= count batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 for j, player in enumerate(players): if mask: cfs[count] = prev * ( 1 - players[paths[i][j]]) + explicand[0] * players[paths[i][j]] else: x = players[paths[i][j]][0] y = players[paths[i][j]][1] cfs[count] = prev cfs[count, x, y, :] = explicand[0, x, y, :] prev = cfs[count] count += 1 total_count+=1 if count == self.batch_size or count == remaining_batch_size: if ret_cfs: all_cfs.append(cfs) ret.append(self.model(cfs).numpy()) remaining_batch_size -= count batched_shape[0] = min(self.batch_size, remaining_batch_size) cfs = np.zeros(batched_shape) count = 0 if total: if ret_cfs: return np.concatenate(ret), np.concatenate(all_cfs), total_count else: return np.concatenate(ret), total_count if ret_cfs: return np.concatenate(ret), np.concatenate(all_cfs) return np.concatenate(ret) def solve(self, scores, paths, average=False): # output 2d array num_paths = len(paths) num_players = len(paths[0]) ret = np.zeros((num_paths, num_players)) order = np.zeros((num_paths,num_players)) for i in range(num_paths): for j in range(num_players): order[i][j] += scores[i*(num_players+1)+j+1] - ( scores[i*(num_players+1)+j]) ret[i][paths[i][j]] = scores[i*(num_players+1)+j+1] - ( scores[i*(num_players+1)+j]) if average: ret = np.mean(ret, axis=0) return ret, order def solve_owen(self, scores, paths, average=False): # output 2d array num_paths = len(paths) num_players = len(paths[0]) ret = np.zeros((num_paths, num_players)) for i in range(num_paths): for j in range(num_players): ret[i][paths[i][j]] = scores[i*(num_players+1)+j+1] - ( scores[i*(num_players+1)]) if average: ret = np.mean(ret, axis=0) return ret
31.584416
111
0.594367
677
4,864
4.097489
0.094535
0.094088
0.103821
0.040375
0.941601
0.941601
0.941601
0.941601
0.941601
0.941601
0
0.014534
0.278577
4,864
153
112
31.79085
0.776005
0.010074
0
0.885496
1
0
0
0
0
0
0
0
0
1
0.038168
false
0
0.007634
0
0.129771
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
85b4117ae6570bbed70e3729e70ff60e6f9b7874
18,406
py
Python
sdk/python/pulumi_databricks/databricks/pipeline.py
ingenii-solutions/pulumi-databricks
f03ecc4e190a4e59eb635663f6408350dcab42ea
[ "ECL-2.0", "Apache-2.0" ]
2
2021-12-10T07:35:59.000Z
2022-03-23T22:53:55.000Z
sdk/python/pulumi_databricks/databricks/pipeline.py
ingenii-solutions/pulumi-databricks
f03ecc4e190a4e59eb635663f6408350dcab42ea
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_databricks/databricks/pipeline.py
ingenii-solutions/pulumi-databricks
f03ecc4e190a4e59eb635663f6408350dcab42ea
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['PipelineArgs', 'Pipeline'] @pulumi.input_type class PipelineArgs: def __init__(__self__, *, filters: pulumi.Input['PipelineFiltersArgs'], allow_duplicate_names: Optional[pulumi.Input[bool]] = None, clusters: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]] = None, configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None, continuous: Optional[pulumi.Input[bool]] = None, id: Optional[pulumi.Input[str]] = None, libraries: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]] = None, name: Optional[pulumi.Input[str]] = None, storage: Optional[pulumi.Input[str]] = None, target: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Pipeline resource. """ pulumi.set(__self__, "filters", filters) if allow_duplicate_names is not None: pulumi.set(__self__, "allow_duplicate_names", allow_duplicate_names) if clusters is not None: pulumi.set(__self__, "clusters", clusters) if configuration is not None: pulumi.set(__self__, "configuration", configuration) if continuous is not None: pulumi.set(__self__, "continuous", continuous) if id is not None: pulumi.set(__self__, "id", id) if libraries is not None: pulumi.set(__self__, "libraries", libraries) if name is not None: pulumi.set(__self__, "name", name) if storage is not None: pulumi.set(__self__, "storage", storage) if target is not None: pulumi.set(__self__, "target", target) @property @pulumi.getter def filters(self) -> pulumi.Input['PipelineFiltersArgs']: return pulumi.get(self, "filters") @filters.setter def filters(self, value: pulumi.Input['PipelineFiltersArgs']): pulumi.set(self, "filters", value) @property @pulumi.getter(name="allowDuplicateNames") def allow_duplicate_names(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "allow_duplicate_names") @allow_duplicate_names.setter def allow_duplicate_names(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_duplicate_names", value) @property @pulumi.getter def clusters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]]: return pulumi.get(self, "clusters") @clusters.setter def clusters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]]): pulumi.set(self, "clusters", value) @property @pulumi.getter def configuration(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: return pulumi.get(self, "configuration") @configuration.setter def configuration(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "configuration", value) @property @pulumi.getter def continuous(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "continuous") @continuous.setter def continuous(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "continuous", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def libraries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]]: return pulumi.get(self, "libraries") @libraries.setter def libraries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]]): pulumi.set(self, "libraries", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def storage(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "storage") @storage.setter def storage(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "storage", value) @property @pulumi.getter def target(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "target") @target.setter def target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target", value) @pulumi.input_type class _PipelineState: def __init__(__self__, *, allow_duplicate_names: Optional[pulumi.Input[bool]] = None, clusters: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]] = None, configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None, continuous: Optional[pulumi.Input[bool]] = None, filters: Optional[pulumi.Input['PipelineFiltersArgs']] = None, id: Optional[pulumi.Input[str]] = None, libraries: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]] = None, name: Optional[pulumi.Input[str]] = None, storage: Optional[pulumi.Input[str]] = None, target: Optional[pulumi.Input[str]] = None, url: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Pipeline resources. """ if allow_duplicate_names is not None: pulumi.set(__self__, "allow_duplicate_names", allow_duplicate_names) if clusters is not None: pulumi.set(__self__, "clusters", clusters) if configuration is not None: pulumi.set(__self__, "configuration", configuration) if continuous is not None: pulumi.set(__self__, "continuous", continuous) if filters is not None: pulumi.set(__self__, "filters", filters) if id is not None: pulumi.set(__self__, "id", id) if libraries is not None: pulumi.set(__self__, "libraries", libraries) if name is not None: pulumi.set(__self__, "name", name) if storage is not None: pulumi.set(__self__, "storage", storage) if target is not None: pulumi.set(__self__, "target", target) if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter(name="allowDuplicateNames") def allow_duplicate_names(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "allow_duplicate_names") @allow_duplicate_names.setter def allow_duplicate_names(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_duplicate_names", value) @property @pulumi.getter def clusters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]]: return pulumi.get(self, "clusters") @clusters.setter def clusters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]]]): pulumi.set(self, "clusters", value) @property @pulumi.getter def configuration(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: return pulumi.get(self, "configuration") @configuration.setter def configuration(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "configuration", value) @property @pulumi.getter def continuous(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "continuous") @continuous.setter def continuous(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "continuous", value) @property @pulumi.getter def filters(self) -> Optional[pulumi.Input['PipelineFiltersArgs']]: return pulumi.get(self, "filters") @filters.setter def filters(self, value: Optional[pulumi.Input['PipelineFiltersArgs']]): pulumi.set(self, "filters", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def libraries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]]: return pulumi.get(self, "libraries") @libraries.setter def libraries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineLibraryArgs']]]]): pulumi.set(self, "libraries", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def storage(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "storage") @storage.setter def storage(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "storage", value) @property @pulumi.getter def target(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "target") @target.setter def target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target", value) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) class Pipeline(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, allow_duplicate_names: Optional[pulumi.Input[bool]] = None, clusters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineClusterArgs']]]]] = None, configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None, continuous: Optional[pulumi.Input[bool]] = None, filters: Optional[pulumi.Input[pulumi.InputType['PipelineFiltersArgs']]] = None, id: Optional[pulumi.Input[str]] = None, libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineLibraryArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, storage: Optional[pulumi.Input[str]] = None, target: Optional[pulumi.Input[str]] = None, __props__=None): """ Create a Pipeline resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. """ ... @overload def __init__(__self__, resource_name: str, args: PipelineArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Create a Pipeline resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param PipelineArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(PipelineArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, allow_duplicate_names: Optional[pulumi.Input[bool]] = None, clusters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineClusterArgs']]]]] = None, configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None, continuous: Optional[pulumi.Input[bool]] = None, filters: Optional[pulumi.Input[pulumi.InputType['PipelineFiltersArgs']]] = None, id: Optional[pulumi.Input[str]] = None, libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineLibraryArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, storage: Optional[pulumi.Input[str]] = None, target: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = PipelineArgs.__new__(PipelineArgs) __props__.__dict__["allow_duplicate_names"] = allow_duplicate_names __props__.__dict__["clusters"] = clusters __props__.__dict__["configuration"] = configuration __props__.__dict__["continuous"] = continuous if filters is None and not opts.urn: raise TypeError("Missing required property 'filters'") __props__.__dict__["filters"] = filters __props__.__dict__["id"] = id __props__.__dict__["libraries"] = libraries __props__.__dict__["name"] = name __props__.__dict__["storage"] = storage __props__.__dict__["target"] = target __props__.__dict__["url"] = None super(Pipeline, __self__).__init__( 'databricks:databricks/pipeline:Pipeline', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, allow_duplicate_names: Optional[pulumi.Input[bool]] = None, clusters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineClusterArgs']]]]] = None, configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None, continuous: Optional[pulumi.Input[bool]] = None, filters: Optional[pulumi.Input[pulumi.InputType['PipelineFiltersArgs']]] = None, libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PipelineLibraryArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, storage: Optional[pulumi.Input[str]] = None, target: Optional[pulumi.Input[str]] = None, url: Optional[pulumi.Input[str]] = None) -> 'Pipeline': """ Get an existing Pipeline resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _PipelineState.__new__(_PipelineState) __props__.__dict__["allow_duplicate_names"] = allow_duplicate_names __props__.__dict__["clusters"] = clusters __props__.__dict__["configuration"] = configuration __props__.__dict__["continuous"] = continuous __props__.__dict__["filters"] = filters __props__.__dict__["id"] = id __props__.__dict__["libraries"] = libraries __props__.__dict__["name"] = name __props__.__dict__["storage"] = storage __props__.__dict__["target"] = target __props__.__dict__["url"] = url return Pipeline(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="allowDuplicateNames") def allow_duplicate_names(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "allow_duplicate_names") @property @pulumi.getter def clusters(self) -> pulumi.Output[Optional[Sequence['outputs.PipelineCluster']]]: return pulumi.get(self, "clusters") @property @pulumi.getter def configuration(self) -> pulumi.Output[Optional[Mapping[str, Any]]]: return pulumi.get(self, "configuration") @property @pulumi.getter def continuous(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "continuous") @property @pulumi.getter def filters(self) -> pulumi.Output['outputs.PipelineFilters']: return pulumi.get(self, "filters") @property @pulumi.getter def id(self) -> pulumi.Output[str]: return pulumi.get(self, "id") @property @pulumi.getter def libraries(self) -> pulumi.Output[Optional[Sequence['outputs.PipelineLibrary']]]: return pulumi.get(self, "libraries") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter def storage(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "storage") @property @pulumi.getter def target(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "target") @property @pulumi.getter def url(self) -> pulumi.Output[str]: return pulumi.get(self, "url")
39.926247
134
0.636151
1,981
18,406
5.678445
0.0737
0.112454
0.152014
0.076273
0.839719
0.820251
0.775091
0.76149
0.73251
0.708419
0
0.000072
0.241878
18,406
460
135
40.013043
0.806077
0.060252
0
0.803279
1
0
0.099322
0.017362
0
0
0
0
0
1
0.163934
false
0.002732
0.019126
0.087432
0.281421
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
a40eeeab3b68ba7790a3fa9eb70554c70bb7b19d
2,565
py
Python
speaker_output.py
Sallyke/LIAM
db6fba4e9102b6f18fa63bd3a419b5e3708da021
[ "MIT" ]
11
2021-07-09T13:50:52.000Z
2022-02-16T05:27:53.000Z
speaker_output.py
Sallyke/LIAM
db6fba4e9102b6f18fa63bd3a419b5e3708da021
[ "MIT" ]
3
2021-11-22T02:36:50.000Z
2022-01-28T03:42:14.000Z
speaker_output.py
uoe-agents/LIAM
db6fba4e9102b6f18fa63bd3a419b5e3708da021
[ "MIT" ]
3
2021-07-01T12:29:53.000Z
2022-03-17T16:02:03.000Z
import torch def speaker_output(agent_id, input): values, color = input[0].max(0) color = int(color.data) if agent_id == 1: if color == 0: return torch.Tensor([[1, 0, 0, 0, 0]]) if color == 1: return torch.Tensor([[0, 1, 0, 0, 0]]) if color == 2: return torch.Tensor([[0, 0, 1, 0, 0]]) if agent_id == 2: if color == 0: return torch.Tensor([[0, 1, 0, 0, 0]]) if color == 1: return torch.Tensor([[0, 0, 1, 0, 0]]) if color == 2: return torch.Tensor([[0, 0, 0, 1, 0]]) if agent_id == 3: if color == 0: return torch.Tensor([[0, 1, 0, 0, 0]]) if color == 1: return torch.Tensor([[1, 0, 0, 0, 0]]) if color == 2: return torch.Tensor([[0, 0, 0, 1, 0]]) if agent_id == 4: if color == 0: return torch.Tensor([[0, 0, 0, 1, 0]]) if color == 1: return torch.Tensor([[1, 0, 0, 0, 0]]) if color == 2: return torch.Tensor([[0, 0, 0, 0, 1]]) if agent_id == 5: if color == 0: return torch.Tensor([[1, 0, 0, 0, 0]]) if color == 1: return torch.Tensor([[0, 0, 1, 0, 0]]) if color == 2: return torch.Tensor([[0, 0, 0, 0, 1]]) if agent_id == 6: if color == 0: return torch.Tensor([[0, 0, 0, 0, 1]]) if color == 1: return torch.Tensor([[0, 0, 0, 1, 0]]) if color == 2: return torch.Tensor([[0, 0, 1, 0, 0]]) if agent_id == 7: if color == 0: return torch.Tensor([[0, 0, 0, 1, 0]]) if color == 1: return torch.Tensor([[0, 0, 1, 0, 0]]) if color == 2: return torch.Tensor([[0, 1, 0, 0, 0]]) if agent_id == 8: if color == 0: return torch.Tensor([[0, 0, 1, 0, 0]]) if color == 1: return torch.Tensor([[0, 0, 0, 0, 1]]) if color == 2: return torch.Tensor([[1, 0, 0, 0, 0]]) if agent_id == 9: if color == 0: return torch.Tensor([[0, 0, 0, 1, 0]]) if color == 1: return torch.Tensor([[0, 0, 0, 0, 1]]) if color == 2: return torch.Tensor([[0, 1, 0, 0, 0]]) if agent_id == 10: if color == 0: return torch.Tensor([[0, 0, 1, 0, 0]]) if color == 1: return torch.Tensor([[0, 1, 0, 0, 0]]) if color == 2: return torch.Tensor([[1, 0, 0, 0, 0]])
30.176471
50
0.424951
388
2,565
2.778351
0.069588
0.131725
0.09462
0.400742
0.910019
0.910019
0.903525
0.903525
0.903525
0.893321
0
0.124116
0.393762
2,565
84
51
30.535714
0.569132
0
0
0.810811
0
0
0
0
0
0
0
0
0
1
0.013514
false
0
0.013514
0
0.432432
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
a45552c2e630cfdf9bde43b14905d9ba1f0c3df8
1,822
py
Python
mysql_config/WebMonitoring/generators/website/tests/test_website_generate_samples_queries.py
raresraf/rafMetrics
21eb5e8210364bf70eee746d71c45f3e353dcb10
[ "MIT" ]
15
2019-11-03T18:01:27.000Z
2021-05-05T20:54:57.000Z
mysql_config/WebMonitoring/generators/website/tests/test_website_generate_samples_queries.py
raresraf/rafMetrics
21eb5e8210364bf70eee746d71c45f3e353dcb10
[ "MIT" ]
392
2019-11-09T21:28:01.000Z
2022-03-31T13:04:45.000Z
mysql_config/WebMonitoring/generators/website/tests/test_website_generate_samples_queries.py
raresraf/rafMetrics
21eb5e8210364bf70eee746d71c45f3e353dcb10
[ "MIT" ]
1
2021-03-11T18:35:16.000Z
2021-03-11T18:35:16.000Z
from mysql_config.WebMonitoring.generators.website.tests.website_expected_size import ( EXPECTED_DAILY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE, EXPECTED_WEEKLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE, EXPECTED_MONTHLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE, ) from mysql_config.WebMonitoring.generators.website.tests.website_expected_time import ( EXPECTED_DAILY_WEBSITE_GENERATE_SAMPLES_QUERIES, EXPECTED_WEEKLY_WEBSITE_GENERATE_SAMPLES_QUERIES, EXPECTED_MONTHLY_WEBSITE_GENERATE_SAMPLES_QUERIES, ) from mysql_config.WebMonitoring.generators.website.website_generate_samples_queries import ( website_generate_samples_queries, ) from mysql_config.WebMonitoring.generators.website.website_generate_samples_queries_size import ( website_generate_samples_queries_size, ) def test_website_generate_samples_queries(capfd): website_generate_samples_queries("daily") out, _ = capfd.readouterr() assert out == EXPECTED_DAILY_WEBSITE_GENERATE_SAMPLES_QUERIES website_generate_samples_queries("weekly") out, err = capfd.readouterr() assert out == EXPECTED_WEEKLY_WEBSITE_GENERATE_SAMPLES_QUERIES website_generate_samples_queries("monthly") out, err = capfd.readouterr() assert out == EXPECTED_MONTHLY_WEBSITE_GENERATE_SAMPLES_QUERIES def test_website_generate_samples_queries_size(capfd): website_generate_samples_queries_size("daily") out, _ = capfd.readouterr() assert out == EXPECTED_DAILY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE website_generate_samples_queries_size("weekly") out, err = capfd.readouterr() assert out == EXPECTED_WEEKLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE website_generate_samples_queries_size("monthly") out, err = capfd.readouterr() assert out == EXPECTED_MONTHLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE
42.372093
97
0.83315
214
1,822
6.53271
0.11215
0.257511
0.377682
0.497854
0.994278
0.904864
0.811874
0.669528
0.62804
0.53505
0
0
0.108672
1,822
42
98
43.380952
0.860837
0
0
0.176471
1
0
0.019759
0
0
0
0
0
0.176471
1
0.058824
false
0
0.117647
0
0.176471
0
0
0
0
null
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f10e314284bac198a51a793d0961c97b7aff9175
81
py
Python
src/sensationdriver/platform.py
sebastianludwig/SensationDriver
8787afa8fb55a43af69d83192a4f09a4279b5c00
[ "MIT" ]
null
null
null
src/sensationdriver/platform.py
sebastianludwig/SensationDriver
8787afa8fb55a43af69d83192a4f09a4279b5c00
[ "MIT" ]
null
null
null
src/sensationdriver/platform.py
sebastianludwig/SensationDriver
8787afa8fb55a43af69d83192a4f09a4279b5c00
[ "MIT" ]
null
null
null
import os def is_raspberry(): return os.popen('uname').read() == 'Linux\n'
13.5
48
0.62963
12
81
4.166667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.17284
81
5
49
16.2
0.746269
0
0
0
0
0
0.148148
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
f11c60853cfc979266d8020d350309a1addc6d06
7,978
py
Python
tests/test_routing.py
gocept/alphaflow
4b797cb12fb52254b1884159fd9a8b899c739f7c
[ "ZPL-2.1", "ZPL-2.0" ]
null
null
null
tests/test_routing.py
gocept/alphaflow
4b797cb12fb52254b1884159fd9a8b899c739f7c
[ "ZPL-2.1", "ZPL-2.0" ]
null
null
null
tests/test_routing.py
gocept/alphaflow
4b797cb12fb52254b1884159fd9a8b899c739f7c
[ "ZPL-2.1", "ZPL-2.0" ]
1
2021-11-01T07:58:18.000Z
2021-11-01T07:58:18.000Z
# Copyright (c) 2004-2006 gocept gmbh & co. kg # See also LICENSE.txt # $Id$ import unittest from Products.Archetypes.tests.utils import * from Products.Archetypes.tests.common import * from Products.CMFCore.utils import getToolByName from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase from Products.AlphaFlow.activities.routing import RouteWorkItem, RouteActivity from Products.AlphaFlow.activities.interfaces import \ IRouteWorkItem, IRouteActivity, ILifeCycleController class RouteTest(AlphaFlowTestCase): interfaces_to_test = [ (IRouteWorkItem, RouteWorkItem), (IRouteActivity, RouteActivity) ] def test_definition(self): # Creates a simple workflow portal = self.portal self._create_test_users() self.loginAsPortalOwner() self._import_wf('workflows/routing_example.alf') wftool = getToolByName(portal, 'workflow_manager') # Create object for instanciation of this process portal.createObject("testdocument", "DummyContent") # Initialize the process doc = portal.testdocument doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") # Do the same process but reject this time self.loginAsPortalOwner() doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].reject() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") def test_routing_with_decision(self): portal = self.portal self._create_test_users() self.loginAsPortalOwner() self._import_wf('workflows/routing_simpledec.alf') wftool = getToolByName(portal, 'workflow_manager') # Create object for instanciation of this process portal.createObject("testdocument", "DummyContent") # Initialize the process doc = portal.testdocument doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("editor1") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor3") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor2") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(ILifeCycleController(wi).state, "failed") # Do the same process but reject this time self.loginAsPortalOwner() doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].reject() self.assertEquals(controller.state, "active") self.login("editor1") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor2") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor3") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") # Do the same process but editor2 rejects this time self.loginAsPortalOwner() doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor1") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "active") self.login("editor2") doc.getWorkItemsForCurrentUser()[0].reject() self.assertEquals(controller.state, "active") self.login("editor3") doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") def test_delayed_discriminator(self): portal = self.portal self._create_test_users() self.loginAsPortalOwner() self._import_wf('workflows/routing_delayed_discriminator.alf') wftool = getToolByName(portal, 'workflow_manager') # Create object for instanciation of this process portal.createObject("testdocument", "DummyContent") # Initialize the process doc = portal.testdocument doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") # Do the same process but reject this time self.loginAsPortalOwner() doc.assignProcess(self.test_process) process = doc.getInstance() controller = ILifeCycleController(process) controller.start("testing") self.assertEquals(controller.state, "active") self.login("author") doc.getWorkItemsForCurrentUser()[0].reject() self.assertEquals(controller.state, "active") doc.getWorkItemsForCurrentUser()[0].accept() doc.getWorkItemsForCurrentUser()[0].accept() self.assertEquals(controller.state, "ended") self.assertEquals(controller.completed, True) wis = process.getWorkItems(state=None) for wi in wis: self.failIfEqual(wi.state, "failed") def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(RouteTest)) return suite if __name__ == '__main__': framework()
33.103734
78
0.664076
749
7,978
7.012016
0.149533
0.091394
0.148515
0.14166
0.854532
0.854532
0.854532
0.854532
0.853389
0.852437
0
0.006656
0.227877
7,978
240
79
33.241667
0.845942
0.060416
0
0.843373
0
0
0.075401
0.01377
0
0
0
0
0.180723
1
0.024096
false
0
0.060241
0
0.10241
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f125d54d0dfd7f15f9d8615b707760f434cfde1b
98
py
Python
_helper_functions/__init__.py
oxquantum/CVAE
0352ddc51fbfd8d57b155e6de66b4c34e010beac
[ "MIT" ]
null
null
null
_helper_functions/__init__.py
oxquantum/CVAE
0352ddc51fbfd8d57b155e6de66b4c34e010beac
[ "MIT" ]
null
null
null
_helper_functions/__init__.py
oxquantum/CVAE
0352ddc51fbfd8d57b155e6de66b4c34e010beac
[ "MIT" ]
null
null
null
from .create_directory import create_directory from .dict_to_namedtuple import dict_to_namedtuple
32.666667
50
0.897959
14
98
5.857143
0.5
0.365854
0.390244
0
0
0
0
0
0
0
0
0
0.081633
98
2
51
49
0.911111
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
f175581f8a9bfa645aea08945f0cc1f1f2a1a653
145,101
py
Python
pyboto3/inspector.py
gehad-shaat/pyboto3
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
[ "MIT" ]
91
2016-12-31T11:38:37.000Z
2021-09-16T19:33:23.000Z
pyboto3/inspector.py
gehad-shaat/pyboto3
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
[ "MIT" ]
7
2017-01-02T18:54:23.000Z
2020-08-11T13:54:02.000Z
pyboto3/inspector.py
gehad-shaat/pyboto3
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
[ "MIT" ]
26
2016-12-31T13:11:00.000Z
2022-03-03T21:01:12.000Z
''' The MIT License (MIT) Copyright (c) 2016 WavyCloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' def add_attributes_to_findings(findingArns=None, attributes=None): """ Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings. See also: AWS API Documentation Exceptions Examples Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings. Expected Output: :example: response = client.add_attributes_to_findings( findingArns=[ 'string', ], attributes=[ { 'key': 'string', 'value': 'string' }, ] ) :type findingArns: list :param findingArns: [REQUIRED]\nThe ARNs that specify the findings that you want to assign attributes to.\n\n(string) --\n\n :type attributes: list :param attributes: [REQUIRED]\nThe array of attributes that you want to assign to specified findings.\n\n(dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions.\n\nkey (string) -- [REQUIRED]The attribute key.\n\nvalue (string) --The value assigned to the attribute key.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax { 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- failedItems (dict) -- Attribute details that cannot be described. An error code is provided for each failed item. (string) -- (dict) -- Includes details about the failed items. failureCode (string) -- The status code of a failed item. retryable (boolean) -- Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings. response = client.add_attributes_to_findings( attributes=[ { 'key': 'Example', 'value': 'example', }, ], findingArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-8l1VIE0D/run/0-Z02cjjug/finding/0-T8yM9mEU', ], ) print(response) Expected Output: { 'failedItems': { }, 'ResponseMetadata': { '...': '...', }, } :return: { 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def can_paginate(operation_name=None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo'). """ pass def create_assessment_target(assessmentTargetName=None, resourceGroupArn=None): """ Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup . If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target. If the service-linked role isn\xe2\x80\x99t already registered, this action also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets . See also: AWS API Documentation Exceptions Examples Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. Expected Output: :example: response = client.create_assessment_target( assessmentTargetName='string', resourceGroupArn='string' ) :type assessmentTargetName: string :param assessmentTargetName: [REQUIRED]\nThe user-defined name that identifies the assessment target that you want to create. The name must be unique within the AWS account.\n :type resourceGroupArn: string :param resourceGroupArn: The ARN that specifies the resource group that is used to create the assessment target. If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target. :rtype: dict ReturnsResponse Syntax { 'assessmentTargetArn': 'string' } Response Structure (dict) -- assessmentTargetArn (string) -- The ARN that specifies the assessment target that is created. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. response = client.create_assessment_target( assessmentTargetName='ExampleAssessmentTarget', resourceGroupArn='arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-AB6DMKnv', ) print(response) Expected Output: { 'assessmentTargetArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTargetArn': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def create_assessment_template(assessmentTargetArn=None, assessmentTemplateName=None, durationInSeconds=None, rulesPackageArns=None, userAttributesForFindings=None): """ Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. If the service-linked role isn\xe2\x80\x99t already registered, this action also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. See also: AWS API Documentation Exceptions Examples Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. Expected Output: :example: response = client.create_assessment_template( assessmentTargetArn='string', assessmentTemplateName='string', durationInSeconds=123, rulesPackageArns=[ 'string', ], userAttributesForFindings=[ { 'key': 'string', 'value': 'string' }, ] ) :type assessmentTargetArn: string :param assessmentTargetArn: [REQUIRED]\nThe ARN that specifies the assessment target for which you want to create the assessment template.\n :type assessmentTemplateName: string :param assessmentTemplateName: [REQUIRED]\nThe user-defined name that identifies the assessment template that you want to create. You can create several assessment templates for an assessment target. The names of the assessment templates that correspond to a particular assessment target must be unique.\n :type durationInSeconds: integer :param durationInSeconds: [REQUIRED]\nThe duration of the assessment run in seconds.\n :type rulesPackageArns: list :param rulesPackageArns: [REQUIRED]\nThe ARNs that specify the rules packages that you want to attach to the assessment template.\n\n(string) --\n\n :type userAttributesForFindings: list :param userAttributesForFindings: The user-defined attributes that are assigned to every finding that is generated by the assessment run that uses this assessment template. An attribute is a key and value pair (an Attribute object). Within an assessment template, each key must be unique.\n\n(dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions.\n\nkey (string) -- [REQUIRED]The attribute key.\n\nvalue (string) --The value assigned to the attribute key.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax { 'assessmentTemplateArn': 'string' } Response Structure (dict) -- assessmentTemplateArn (string) -- The ARN that specifies the assessment template that is created. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. response = client.create_assessment_template( assessmentTargetArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX', assessmentTemplateName='ExampleAssessmentTemplate', durationInSeconds=180, rulesPackageArns=[ 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-11B9DBXp', ], userAttributesForFindings=[ { 'key': 'Example', 'value': 'example', }, ], ) print(response) Expected Output: { 'assessmentTemplateArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTemplateArn': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def create_exclusions_preview(assessmentTemplateArn=None): """ Starts the generation of an exclusions preview for the specified assessment template. The exclusions preview lists the potential exclusions (ExclusionPreview) that Inspector can detect before it runs the assessment. See also: AWS API Documentation Exceptions :example: response = client.create_exclusions_preview( assessmentTemplateArn='string' ) :type assessmentTemplateArn: string :param assessmentTemplateArn: [REQUIRED]\nThe ARN that specifies the assessment template for which you want to create an exclusions preview.\n :rtype: dict ReturnsResponse Syntax{ 'previewToken': 'string' } Response Structure (dict) -- previewToken (string) --Specifies the unique identifier of the requested exclusions preview. You can use the unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview API. Exceptions Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.PreviewGenerationInProgressException Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException :return: { 'previewToken': 'string' } """ pass def create_resource_group(resourceGroupTags=None): """ Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. For more information, see CreateAssessmentTarget . See also: AWS API Documentation Exceptions Examples Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. Expected Output: :example: response = client.create_resource_group( resourceGroupTags=[ { 'key': 'string', 'value': 'string' }, ] ) :type resourceGroupTags: list :param resourceGroupTags: [REQUIRED]\nA collection of keys and an array of possible values, \'[{'key':'key1','values':['Value1','Value2']},{'key':'Key2','values':['Value3']}]\'.\nFor example,\'[{'key':'Name','values':['TestEC2Instance']}]\'.\n\n(dict) --This data type is used as one of the elements of the ResourceGroup data type.\n\nkey (string) -- [REQUIRED]A tag key.\n\nvalue (string) --The value assigned to a tag key.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax{ 'resourceGroupArn': 'string' } Response Structure (dict) -- resourceGroupArn (string) --The ARN that specifies the resource group that is created. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. response = client.create_resource_group( resourceGroupTags=[ { 'key': 'Name', 'value': 'example', }, ], ) print(response) Expected Output: { 'resourceGroupArn': 'arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-AB6DMKnv', 'ResponseMetadata': { '...': '...', }, } :return: { 'resourceGroupArn': 'string' } """ pass def delete_assessment_run(assessmentRunArn=None): """ Deletes the assessment run that is specified by the ARN of the assessment run. See also: AWS API Documentation Exceptions Examples Deletes the assessment run that is specified by the ARN of the assessment run. Expected Output: :example: response = client.delete_assessment_run( assessmentRunArn='string' ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN that specifies the assessment run that you want to delete.\n :return: response = client.delete_assessment_run( assessmentRunArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T/run/0-11LMTAVe', ) print(response) """ pass def delete_assessment_target(assessmentTargetArn=None): """ Deletes the assessment target that is specified by the ARN of the assessment target. See also: AWS API Documentation Exceptions Examples Deletes the assessment target that is specified by the ARN of the assessment target. Expected Output: :example: response = client.delete_assessment_target( assessmentTargetArn='string' ) :type assessmentTargetArn: string :param assessmentTargetArn: [REQUIRED]\nThe ARN that specifies the assessment target that you want to delete.\n :return: response = client.delete_assessment_target( assessmentTargetArn='arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', ) print(response) """ pass def delete_assessment_template(assessmentTemplateArn=None): """ Deletes the assessment template that is specified by the ARN of the assessment template. See also: AWS API Documentation Exceptions Examples Deletes the assessment template that is specified by the ARN of the assessment template. Expected Output: :example: response = client.delete_assessment_template( assessmentTemplateArn='string' ) :type assessmentTemplateArn: string :param assessmentTemplateArn: [REQUIRED]\nThe ARN that specifies the assessment template that you want to delete.\n :return: response = client.delete_assessment_template( assessmentTemplateArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T', ) print(response) """ pass def describe_assessment_runs(assessmentRunArns=None): """ Describes the assessment runs that are specified by the ARNs of the assessment runs. See also: AWS API Documentation Exceptions Examples Describes the assessment runs that are specified by the ARNs of the assessment runs. Expected Output: :example: response = client.describe_assessment_runs( assessmentRunArns=[ 'string', ] ) :type assessmentRunArns: list :param assessmentRunArns: [REQUIRED]\nThe ARN that specifies the assessment run that you want to describe.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'assessmentRuns': [ { 'arn': 'string', 'name': 'string', 'assessmentTemplateArn': 'string', 'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'CANCELED', 'durationInSeconds': 123, 'rulesPackageArns': [ 'string', ], 'userAttributesForFindings': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'startedAt': datetime(2015, 1, 1), 'completedAt': datetime(2015, 1, 1), 'stateChangedAt': datetime(2015, 1, 1), 'dataCollected': True|False, 'stateChanges': [ { 'stateChangedAt': datetime(2015, 1, 1), 'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'CANCELED' }, ], 'notifications': [ { 'date': datetime(2015, 1, 1), 'event': 'ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', 'message': 'string', 'error': True|False, 'snsTopicArn': 'string', 'snsPublishStatusCode': 'SUCCESS'|'TOPIC_DOES_NOT_EXIST'|'ACCESS_DENIED'|'INTERNAL_ERROR' }, ], 'findingCounts': { 'string': 123 } }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- assessmentRuns (list) --Information about the assessment run. (dict) --A snapshot of an Amazon Inspector assessment run that contains the findings of the assessment run . Used as the response element in the DescribeAssessmentRuns action. arn (string) --The ARN of the assessment run. name (string) --The auto-generated name for the assessment run. assessmentTemplateArn (string) --The ARN of the assessment template that is associated with the assessment run. state (string) --The state of the assessment run. durationInSeconds (integer) --The duration of the assessment run. rulesPackageArns (list) --The rules packages selected for the assessment run. (string) -- userAttributesForFindings (list) --The user-defined attributes that are assigned to every generated finding. (dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) --The attribute key. value (string) --The value assigned to the attribute key. createdAt (datetime) --The time when StartAssessmentRun was called. startedAt (datetime) --The time when StartAssessmentRun was called. completedAt (datetime) --The assessment run completion time that corresponds to the rules packages evaluation completion time or failure. stateChangedAt (datetime) --The last time when the assessment run\'s state changed. dataCollected (boolean) --A Boolean value (true or false) that specifies whether the process of collecting data from the agents is completed. stateChanges (list) --A list of the assessment run state changes. (dict) --Used as one of the elements of the AssessmentRun data type. stateChangedAt (datetime) --The last time the assessment run state changed. state (string) --The assessment run state. notifications (list) --A list of notifications for the event subscriptions. A notification about a particular generated finding is added to this list only once. (dict) --Used as one of the elements of the AssessmentRun data type. date (datetime) --The date of the notification. event (string) --The event for which a notification is sent. message (string) --The message included in the notification. error (boolean) --The Boolean value that specifies whether the notification represents an error. snsTopicArn (string) --The SNS topic to which the SNS notification is sent. snsPublishStatusCode (string) --The status code of the SNS notification. findingCounts (dict) --Provides a total count of generated findings per severity. (string) -- (integer) -- failedItems (dict) --Assessment run details that cannot be described. An error code is provided for each failed item. (string) -- (dict) --Includes details about the failed items. failureCode (string) --The status code of a failed item. retryable (boolean) --Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the assessment runs that are specified by the ARNs of the assessment runs. response = client.describe_assessment_runs( assessmentRunArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', ], ) print(response) Expected Output: { 'assessmentRuns': [ { 'name': 'Run 1 for ExampleAssessmentTemplate', 'arn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', 'assessmentTemplateArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw', 'completedAt': datetime(2016, 3, 22, 20, 58, 21, 1, 82, 0), 'createdAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), 'dataCollected': True, 'durationInSeconds': 3600, 'findingCounts': { 'High': 14, 'Informational': 0, 'Low': 0, 'Medium': 2, 'Undefined': 0, }, 'notifications': [ ], 'rulesPackageArns': [ 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-X1KXtawP', ], 'startedAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), 'state': 'COMPLETED', 'stateChangedAt': datetime(2016, 3, 22, 20, 58, 21, 1, 82, 0), 'stateChanges': [ { 'state': 'CREATED', 'stateChangedAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), }, { 'state': 'START_DATA_COLLECTION_PENDING', 'stateChangedAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), }, { 'state': 'START_DATA_COLLECTION_IN_PROGRESS', 'stateChangedAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), }, { 'state': 'COLLECTING_DATA', 'stateChangedAt': datetime(2016, 3, 22, 20, 56, 10, 1, 82, 0), }, { 'state': 'STOP_DATA_COLLECTION_PENDING', 'stateChangedAt': datetime(2016, 3, 22, 20, 57, 19, 1, 82, 0), }, { 'state': 'DATA_COLLECTED', 'stateChangedAt': datetime(2016, 3, 22, 20, 58, 19, 1, 82, 0), }, { 'state': 'EVALUATING_RULES', 'stateChangedAt': datetime(2016, 3, 22, 20, 58, 20, 1, 82, 0), }, { 'state': 'COMPLETED', 'stateChangedAt': datetime(2016, 3, 22, 20, 58, 21, 1, 82, 0), }, ], 'userAttributesForFindings': [ ], }, ], 'failedItems': { }, 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentRuns': [ { 'arn': 'string', 'name': 'string', 'assessmentTemplateArn': 'string', 'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'CANCELED', 'durationInSeconds': 123, 'rulesPackageArns': [ 'string', ], 'userAttributesForFindings': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'startedAt': datetime(2015, 1, 1), 'completedAt': datetime(2015, 1, 1), 'stateChangedAt': datetime(2015, 1, 1), 'dataCollected': True|False, 'stateChanges': [ { 'stateChangedAt': datetime(2015, 1, 1), 'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'CANCELED' }, ], 'notifications': [ { 'date': datetime(2015, 1, 1), 'event': 'ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', 'message': 'string', 'error': True|False, 'snsTopicArn': 'string', 'snsPublishStatusCode': 'SUCCESS'|'TOPIC_DOES_NOT_EXIST'|'ACCESS_DENIED'|'INTERNAL_ERROR' }, ], 'findingCounts': { 'string': 123 } }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: (string) -- """ pass def describe_assessment_targets(assessmentTargetArns=None): """ Describes the assessment targets that are specified by the ARNs of the assessment targets. See also: AWS API Documentation Exceptions Examples Describes the assessment targets that are specified by the ARNs of the assessment targets. Expected Output: :example: response = client.describe_assessment_targets( assessmentTargetArns=[ 'string', ] ) :type assessmentTargetArns: list :param assessmentTargetArns: [REQUIRED]\nThe ARNs that specifies the assessment targets that you want to describe.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'assessmentTargets': [ { 'arn': 'string', 'name': 'string', 'resourceGroupArn': 'string', 'createdAt': datetime(2015, 1, 1), 'updatedAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- assessmentTargets (list) --Information about the assessment targets. (dict) --Contains information about an Amazon Inspector application. This data type is used as the response element in the DescribeAssessmentTargets action. arn (string) --The ARN that specifies the Amazon Inspector assessment target. name (string) --The name of the Amazon Inspector assessment target. resourceGroupArn (string) --The ARN that specifies the resource group that is associated with the assessment target. createdAt (datetime) --The time at which the assessment target is created. updatedAt (datetime) --The time at which UpdateAssessmentTarget is called. failedItems (dict) --Assessment target details that cannot be described. An error code is provided for each failed item. (string) -- (dict) --Includes details about the failed items. failureCode (string) --The status code of a failed item. retryable (boolean) --Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the assessment targets that are specified by the ARNs of the assessment targets. response = client.describe_assessment_targets( assessmentTargetArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', ], ) print(response) Expected Output: { 'assessmentTargets': [ { 'name': 'ExampleAssessmentTarget', 'arn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', 'createdAt': datetime(2016, 3, 15, 20, 36, 31, 1, 75, 0), 'resourceGroupArn': 'arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-PyGXopAI', 'updatedAt': datetime(2016, 3, 15, 20, 36, 31, 1, 75, 0), }, ], 'failedItems': { }, 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTargets': [ { 'arn': 'string', 'name': 'string', 'resourceGroupArn': 'string', 'createdAt': datetime(2015, 1, 1), 'updatedAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException """ pass def describe_assessment_templates(assessmentTemplateArns=None): """ Describes the assessment templates that are specified by the ARNs of the assessment templates. See also: AWS API Documentation Exceptions Examples Describes the assessment templates that are specified by the ARNs of the assessment templates. Expected Output: :example: response = client.describe_assessment_templates( assessmentTemplateArns=[ 'string', ] ) :type assessmentTemplateArns: list :param assessmentTemplateArns: [REQUIRED]\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'assessmentTemplates': [ { 'arn': 'string', 'name': 'string', 'assessmentTargetArn': 'string', 'durationInSeconds': 123, 'rulesPackageArns': [ 'string', ], 'userAttributesForFindings': [ { 'key': 'string', 'value': 'string' }, ], 'lastAssessmentRunArn': 'string', 'assessmentRunCount': 123, 'createdAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- assessmentTemplates (list) --Information about the assessment templates. (dict) --Contains information about an Amazon Inspector assessment template. This data type is used as the response element in the DescribeAssessmentTemplates action. arn (string) --The ARN of the assessment template. name (string) --The name of the assessment template. assessmentTargetArn (string) --The ARN of the assessment target that corresponds to this assessment template. durationInSeconds (integer) --The duration in seconds specified for this assessment template. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day). rulesPackageArns (list) --The rules packages that are specified for this assessment template. (string) -- userAttributesForFindings (list) --The user-defined attributes that are assigned to every generated finding from the assessment run that uses this assessment template. (dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) --The attribute key. value (string) --The value assigned to the attribute key. lastAssessmentRunArn (string) --The Amazon Resource Name (ARN) of the most recent assessment run associated with this assessment template. This value exists only when the value of assessmentRunCount is greaterpa than zero. assessmentRunCount (integer) --The number of existing assessment runs associated with this assessment template. This value can be zero or a positive integer. createdAt (datetime) --The time at which the assessment template is created. failedItems (dict) --Assessment template details that cannot be described. An error code is provided for each failed item. (string) -- (dict) --Includes details about the failed items. failureCode (string) --The status code of a failed item. retryable (boolean) --Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the assessment templates that are specified by the ARNs of the assessment templates. response = client.describe_assessment_templates( assessmentTemplateArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw', ], ) print(response) Expected Output: { 'assessmentTemplates': [ { 'name': 'ExampleAssessmentTemplate', 'arn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw', 'assessmentRunCount': 0, 'assessmentTargetArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', 'createdAt': datetime(2016, 3, 15, 20, 36, 31, 1, 75, 0), 'durationInSeconds': 3600, 'rulesPackageArns': [ 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-X1KXtawP', ], 'userAttributesForFindings': [ ], }, ], 'failedItems': { }, 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTemplates': [ { 'arn': 'string', 'name': 'string', 'assessmentTargetArn': 'string', 'durationInSeconds': 123, 'rulesPackageArns': [ 'string', ], 'userAttributesForFindings': [ { 'key': 'string', 'value': 'string' }, ], 'lastAssessmentRunArn': 'string', 'assessmentRunCount': 123, 'createdAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: (string) -- """ pass def describe_cross_account_access_role(): """ Describes the IAM role that enables Amazon Inspector to access your AWS account. See also: AWS API Documentation Exceptions Examples Describes the IAM role that enables Amazon Inspector to access your AWS account. Expected Output: :example: response = client.describe_cross_account_access_role() :rtype: dict ReturnsResponse Syntax{ 'roleArn': 'string', 'valid': True|False, 'registeredAt': datetime(2015, 1, 1) } Response Structure (dict) -- roleArn (string) --The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS account. valid (boolean) --A Boolean value that specifies whether the IAM role has the necessary policies attached to enable Amazon Inspector to access your AWS account. registeredAt (datetime) --The date when the cross-account access role was registered. Exceptions Inspector.Client.exceptions.InternalException Examples Describes the IAM role that enables Amazon Inspector to access your AWS account. response = client.describe_cross_account_access_role( ) print(response) Expected Output: { 'registeredAt': datetime(2016, 3, 15, 19, 13, 2, 1, 75, 0), 'roleArn': 'arn:aws:iam::123456789012:role/inspector', 'valid': True, 'ResponseMetadata': { '...': '...', }, } :return: { 'roleArn': 'string', 'valid': True|False, 'registeredAt': datetime(2015, 1, 1) } """ pass def describe_exclusions(exclusionArns=None, locale=None): """ Describes the exclusions that are specified by the exclusions\' ARNs. See also: AWS API Documentation Exceptions :example: response = client.describe_exclusions( exclusionArns=[ 'string', ], locale='EN_US' ) :type exclusionArns: list :param exclusionArns: [REQUIRED]\nThe list of ARNs that specify the exclusions that you want to describe.\n\n(string) --\n\n :type locale: string :param locale: The locale into which you want to translate the exclusion\'s title, description, and recommendation. :rtype: dict ReturnsResponse Syntax { 'exclusions': { 'string': { 'arn': 'string', 'title': 'string', 'description': 'string', 'recommendation': 'string', 'scopes': [ { 'key': 'INSTANCE_ID'|'RULES_PACKAGE_ARN', 'value': 'string' }, ], 'attributes': [ { 'key': 'string', 'value': 'string' }, ] } }, 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- exclusions (dict) -- Information about the exclusions. (string) -- (dict) -- Contains information about what was excluded from an assessment run. arn (string) -- The ARN that specifies the exclusion. title (string) -- The name of the exclusion. description (string) -- The description of the exclusion. recommendation (string) -- The recommendation for the exclusion. scopes (list) -- The AWS resources for which the exclusion pertains. (dict) -- This data type contains key-value pairs that identify various Amazon resources. key (string) -- The type of the scope. value (string) -- The resource identifier for the specified scope type. attributes (list) -- The system-defined attributes for the exclusion. (dict) -- This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) -- The attribute key. value (string) -- The value assigned to the attribute key. failedItems (dict) -- Exclusion details that cannot be described. An error code is provided for each failed item. (string) -- (dict) -- Includes details about the failed items. failureCode (string) -- The status code of a failed item. retryable (boolean) -- Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException :return: { 'exclusions': { 'string': { 'arn': 'string', 'title': 'string', 'description': 'string', 'recommendation': 'string', 'scopes': [ { 'key': 'INSTANCE_ID'|'RULES_PACKAGE_ARN', 'value': 'string' }, ], 'attributes': [ { 'key': 'string', 'value': 'string' }, ] } }, 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException """ pass def describe_findings(findingArns=None, locale=None): """ Describes the findings that are specified by the ARNs of the findings. See also: AWS API Documentation Exceptions Examples Describes the findings that are specified by the ARNs of the findings. Expected Output: :example: response = client.describe_findings( findingArns=[ 'string', ], locale='EN_US' ) :type findingArns: list :param findingArns: [REQUIRED]\nThe ARN that specifies the finding that you want to describe.\n\n(string) --\n\n :type locale: string :param locale: The locale into which you want to translate a finding description, recommendation, and the short description that identifies the finding. :rtype: dict ReturnsResponse Syntax { 'findings': [ { 'arn': 'string', 'schemaVersion': 123, 'service': 'string', 'serviceAttributes': { 'schemaVersion': 123, 'assessmentRunArn': 'string', 'rulesPackageArn': 'string' }, 'assetType': 'ec2-instance', 'assetAttributes': { 'schemaVersion': 123, 'agentId': 'string', 'autoScalingGroup': 'string', 'amiId': 'string', 'hostname': 'string', 'ipv4Addresses': [ 'string', ], 'tags': [ { 'key': 'string', 'value': 'string' }, ], 'networkInterfaces': [ { 'networkInterfaceId': 'string', 'subnetId': 'string', 'vpcId': 'string', 'privateDnsName': 'string', 'privateIpAddress': 'string', 'privateIpAddresses': [ { 'privateDnsName': 'string', 'privateIpAddress': 'string' }, ], 'publicDnsName': 'string', 'publicIp': 'string', 'ipv6Addresses': [ 'string', ], 'securityGroups': [ { 'groupName': 'string', 'groupId': 'string' }, ] }, ] }, 'id': 'string', 'title': 'string', 'description': 'string', 'recommendation': 'string', 'severity': 'Low'|'Medium'|'High'|'Informational'|'Undefined', 'numericSeverity': 123.0, 'confidence': 123, 'indicatorOfCompromise': True|False, 'attributes': [ { 'key': 'string', 'value': 'string' }, ], 'userAttributes': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'updatedAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- findings (list) -- Information about the finding. (dict) -- Contains information about an Amazon Inspector finding. This data type is used as the response element in the DescribeFindings action. arn (string) -- The ARN that specifies the finding. schemaVersion (integer) -- The schema version of this data type. service (string) -- The data element is set to "Inspector". serviceAttributes (dict) -- This data type is used in the Finding data type. schemaVersion (integer) -- The schema version of this data type. assessmentRunArn (string) -- The ARN of the assessment run during which the finding is generated. rulesPackageArn (string) -- The ARN of the rules package that is used to generate the finding. assetType (string) -- The type of the host from which the finding is generated. assetAttributes (dict) -- A collection of attributes of the host from which the finding is generated. schemaVersion (integer) -- The schema version of this data type. agentId (string) -- The ID of the agent that is installed on the EC2 instance where the finding is generated. autoScalingGroup (string) -- The Auto Scaling group of the EC2 instance where the finding is generated. amiId (string) -- The ID of the Amazon Machine Image (AMI) that is installed on the EC2 instance where the finding is generated. hostname (string) -- The hostname of the EC2 instance where the finding is generated. ipv4Addresses (list) -- The list of IP v4 addresses of the EC2 instance where the finding is generated. (string) -- tags (list) -- The tags related to the EC2 instance where the finding is generated. (dict) -- A key and value pair. This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action. key (string) -- A tag key. value (string) -- A value assigned to a tag key. networkInterfaces (list) -- An array of the network interfaces interacting with the EC2 instance where the finding is generated. (dict) -- Contains information about the network interfaces interacting with an EC2 instance. This data type is used as one of the elements of the AssetAttributes data type. networkInterfaceId (string) -- The ID of the network interface. subnetId (string) -- The ID of a subnet associated with the network interface. vpcId (string) -- The ID of a VPC associated with the network interface. privateDnsName (string) -- The name of a private DNS associated with the network interface. privateIpAddress (string) -- The private IP address associated with the network interface. privateIpAddresses (list) -- A list of the private IP addresses associated with the network interface. Includes the privateDnsName and privateIpAddress. (dict) -- Contains information about a private IP address associated with a network interface. This data type is used as a response element in the DescribeFindings action. privateDnsName (string) -- The DNS name of the private IP address. privateIpAddress (string) -- The full IP address of the network inteface. publicDnsName (string) -- The name of a public DNS associated with the network interface. publicIp (string) -- The public IP address from which the network interface is reachable. ipv6Addresses (list) -- The IP addresses associated with the network interface. (string) -- securityGroups (list) -- A list of the security groups associated with the network interface. Includes the groupId and groupName. (dict) -- Contains information about a security group associated with a network interface. This data type is used as one of the elements of the NetworkInterface data type. groupName (string) -- The name of the security group. groupId (string) -- The ID of the security group. id (string) -- The ID of the finding. title (string) -- The name of the finding. description (string) -- The description of the finding. recommendation (string) -- The recommendation for the finding. severity (string) -- The finding severity. Values can be set to High, Medium, Low, and Informational. numericSeverity (float) -- The numeric value of the finding severity. confidence (integer) -- This data element is currently not used. indicatorOfCompromise (boolean) -- This data element is currently not used. attributes (list) -- The system-defined attributes for the finding. (dict) -- This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) -- The attribute key. value (string) -- The value assigned to the attribute key. userAttributes (list) -- The user-defined attributes that are assigned to the finding. (dict) -- This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) -- The attribute key. value (string) -- The value assigned to the attribute key. createdAt (datetime) -- The time when the finding was generated. updatedAt (datetime) -- The time when AddAttributesToFindings is called. failedItems (dict) -- Finding details that cannot be described. An error code is provided for each failed item. (string) -- (dict) -- Includes details about the failed items. failureCode (string) -- The status code of a failed item. retryable (boolean) -- Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the findings that are specified by the ARNs of the findings. response = client.describe_findings( findingArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE/finding/0-HwPnsDm4', ], ) print(response) Expected Output: { 'failedItems': { }, 'findings': [ { 'arn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE/finding/0-HwPnsDm4', 'assetAttributes': { 'ipv4Addresses': [ ], 'schemaVersion': 1, }, 'assetType': 'ec2-instance', 'attributes': [ ], 'confidence': 10, 'createdAt': datetime(2016, 3, 22, 20, 58, 21, 1, 82, 0), 'description': 'Amazon Inspector did not find any potential security issues during this assessment.', 'indicatorOfCompromise': False, 'numericSeverity': 0, 'recommendation': 'No remediation needed.', 'schemaVersion': 1, 'service': 'Inspector', 'serviceAttributes': { 'assessmentRunArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', 'rulesPackageArn': 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-X1KXtawP', 'schemaVersion': 1, }, 'severity': 'Informational', 'title': 'No potential security issues found', 'updatedAt': datetime(2016, 3, 22, 20, 58, 21, 1, 82, 0), 'userAttributes': [ ], }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'findings': [ { 'arn': 'string', 'schemaVersion': 123, 'service': 'string', 'serviceAttributes': { 'schemaVersion': 123, 'assessmentRunArn': 'string', 'rulesPackageArn': 'string' }, 'assetType': 'ec2-instance', 'assetAttributes': { 'schemaVersion': 123, 'agentId': 'string', 'autoScalingGroup': 'string', 'amiId': 'string', 'hostname': 'string', 'ipv4Addresses': [ 'string', ], 'tags': [ { 'key': 'string', 'value': 'string' }, ], 'networkInterfaces': [ { 'networkInterfaceId': 'string', 'subnetId': 'string', 'vpcId': 'string', 'privateDnsName': 'string', 'privateIpAddress': 'string', 'privateIpAddresses': [ { 'privateDnsName': 'string', 'privateIpAddress': 'string' }, ], 'publicDnsName': 'string', 'publicIp': 'string', 'ipv6Addresses': [ 'string', ], 'securityGroups': [ { 'groupName': 'string', 'groupId': 'string' }, ] }, ] }, 'id': 'string', 'title': 'string', 'description': 'string', 'recommendation': 'string', 'severity': 'Low'|'Medium'|'High'|'Informational'|'Undefined', 'numericSeverity': 123.0, 'confidence': 123, 'indicatorOfCompromise': True|False, 'attributes': [ { 'key': 'string', 'value': 'string' }, ], 'userAttributes': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'updatedAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: (string) -- """ pass def describe_resource_groups(resourceGroupArns=None): """ Describes the resource groups that are specified by the ARNs of the resource groups. See also: AWS API Documentation Exceptions Examples Describes the resource groups that are specified by the ARNs of the resource groups. Expected Output: :example: response = client.describe_resource_groups( resourceGroupArns=[ 'string', ] ) :type resourceGroupArns: list :param resourceGroupArns: [REQUIRED]\nThe ARN that specifies the resource group that you want to describe.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'resourceGroups': [ { 'arn': 'string', 'tags': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- resourceGroups (list) --Information about a resource group. (dict) --Contains information about a resource group. The resource group defines a set of tags that, when queried, identify the AWS resources that make up the assessment target. This data type is used as the response element in the DescribeResourceGroups action. arn (string) --The ARN of the resource group. tags (list) --The tags (key and value pairs) of the resource group. This data type property is used in the CreateResourceGroup action. (dict) --This data type is used as one of the elements of the ResourceGroup data type. key (string) --A tag key. value (string) --The value assigned to a tag key. createdAt (datetime) --The time at which resource group is created. failedItems (dict) --Resource group details that cannot be described. An error code is provided for each failed item. (string) -- (dict) --Includes details about the failed items. failureCode (string) --The status code of a failed item. retryable (boolean) --Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the resource groups that are specified by the ARNs of the resource groups. response = client.describe_resource_groups( resourceGroupArns=[ 'arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-PyGXopAI', ], ) print(response) Expected Output: { 'failedItems': { }, 'resourceGroups': [ { 'arn': 'arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-PyGXopAI', 'createdAt': datetime(2016, 3, 15, 20, 36, 31, 1, 75, 0), 'tags': [ { 'key': 'Name', 'value': 'example', }, ], }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'resourceGroups': [ { 'arn': 'string', 'tags': [ { 'key': 'string', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1) }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException """ pass def describe_rules_packages(rulesPackageArns=None, locale=None): """ Describes the rules packages that are specified by the ARNs of the rules packages. See also: AWS API Documentation Exceptions Examples Describes the rules packages that are specified by the ARNs of the rules packages. Expected Output: :example: response = client.describe_rules_packages( rulesPackageArns=[ 'string', ], locale='EN_US' ) :type rulesPackageArns: list :param rulesPackageArns: [REQUIRED]\nThe ARN that specifies the rules package that you want to describe.\n\n(string) --\n\n :type locale: string :param locale: The locale that you want to translate a rules package description into. :rtype: dict ReturnsResponse Syntax { 'rulesPackages': [ { 'arn': 'string', 'name': 'string', 'version': 'string', 'provider': 'string', 'description': 'string' }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- rulesPackages (list) -- Information about the rules package. (dict) -- Contains information about an Amazon Inspector rules package. This data type is used as the response element in the DescribeRulesPackages action. arn (string) -- The ARN of the rules package. name (string) -- The name of the rules package. version (string) -- The version ID of the rules package. provider (string) -- The provider of the rules package. description (string) -- The description of the rules package. failedItems (dict) -- Rules package details that cannot be described. An error code is provided for each failed item. (string) -- (dict) -- Includes details about the failed items. failureCode (string) -- The status code of a failed item. retryable (boolean) -- Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Examples Describes the rules packages that are specified by the ARNs of the rules packages. response = client.describe_rules_packages( rulesPackageArns=[ 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ', ], ) print(response) Expected Output: { 'failedItems': { }, 'rulesPackages': [ { 'version': '1.1', 'name': 'Security Best Practices', 'arn': 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ', 'description': 'The rules in this package help determine whether your systems are configured securely.', 'provider': 'Amazon Web Services, Inc.', }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'rulesPackages': [ { 'arn': 'string', 'name': 'string', 'version': 'string', 'provider': 'string', 'description': 'string' }, ], 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException """ pass def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to\nClientMethod. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model. """ pass def get_assessment_report(assessmentRunArn=None, reportFileFormat=None, reportType=None): """ Produces an assessment report that includes detailed and comprehensive results of a specified assessment run. See also: AWS API Documentation Exceptions :example: response = client.get_assessment_report( assessmentRunArn='string', reportFileFormat='HTML'|'PDF', reportType='FINDING'|'FULL' ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN that specifies the assessment run for which you want to generate a report.\n :type reportFileFormat: string :param reportFileFormat: [REQUIRED]\nSpecifies the file format (html or pdf) of the assessment report that you want to generate.\n :type reportType: string :param reportType: [REQUIRED]\nSpecifies the type of the assessment report that you want to generate. There are two types of assessment reports: a finding report and a full report. For more information, see Assessment Reports .\n :rtype: dict ReturnsResponse Syntax { 'status': 'WORK_IN_PROGRESS'|'FAILED'|'COMPLETED', 'url': 'string' } Response Structure (dict) -- status (string) -- Specifies the status of the request to generate an assessment report. url (string) -- Specifies the URL where you can find the generated assessment report. This parameter is only returned if the report is successfully generated. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.AssessmentRunInProgressException Inspector.Client.exceptions.UnsupportedFeatureException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException :return: { 'status': 'WORK_IN_PROGRESS'|'FAILED'|'COMPLETED', 'url': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.AssessmentRunInProgressException Inspector.Client.exceptions.UnsupportedFeatureException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def get_exclusions_preview(assessmentTemplateArn=None, previewToken=None, nextToken=None, maxResults=None, locale=None): """ Retrieves the exclusions preview (a list of ExclusionPreview objects) specified by the preview token. You can obtain the preview token by running the CreateExclusionsPreview API. See also: AWS API Documentation Exceptions :example: response = client.get_exclusions_preview( assessmentTemplateArn='string', previewToken='string', nextToken='string', maxResults=123, locale='EN_US' ) :type assessmentTemplateArn: string :param assessmentTemplateArn: [REQUIRED]\nThe ARN that specifies the assessment template for which the exclusions preview was requested.\n :type previewToken: string :param previewToken: [REQUIRED]\nThe unique identifier associated of the exclusions preview.\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the GetExclusionsPreviewRequest action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 100. The maximum value is 500. :type locale: string :param locale: The locale into which you want to translate the exclusion\'s title, description, and recommendation. :rtype: dict ReturnsResponse Syntax { 'previewStatus': 'WORK_IN_PROGRESS'|'COMPLETED', 'exclusionPreviews': [ { 'title': 'string', 'description': 'string', 'recommendation': 'string', 'scopes': [ { 'key': 'INSTANCE_ID'|'RULES_PACKAGE_ARN', 'value': 'string' }, ], 'attributes': [ { 'key': 'string', 'value': 'string' }, ] }, ], 'nextToken': 'string' } Response Structure (dict) -- previewStatus (string) -- Specifies the status of the request to generate an exclusions preview. exclusionPreviews (list) -- Information about the exclusions included in the preview. (dict) -- Contains information about what is excluded from an assessment run given the current state of the assessment template. title (string) -- The name of the exclusion preview. description (string) -- The description of the exclusion preview. recommendation (string) -- The recommendation for the exclusion preview. scopes (list) -- The AWS resources for which the exclusion preview pertains. (dict) -- This data type contains key-value pairs that identify various Amazon resources. key (string) -- The type of the scope. value (string) -- The resource identifier for the specified scope type. attributes (list) -- The system-defined attributes for the exclusion preview. (dict) -- This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions. key (string) -- The attribute key. value (string) -- The value assigned to the attribute key. nextToken (string) -- When a response is generated, if there is more data to be listed, this parameters is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException :return: { 'previewStatus': 'WORK_IN_PROGRESS'|'COMPLETED', 'exclusionPreviews': [ { 'title': 'string', 'description': 'string', 'recommendation': 'string', 'scopes': [ { 'key': 'INSTANCE_ID'|'RULES_PACKAGE_ARN', 'value': 'string' }, ], 'attributes': [ { 'key': 'string', 'value': 'string' }, ] }, ], 'nextToken': 'string' } :returns: Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException """ pass def get_paginator(operation_name=None): """ Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo'). :rtype: L{botocore.paginate.Paginator} ReturnsA paginator object. """ pass def get_telemetry_metadata(assessmentRunArn=None): """ Information about the data that is collected for the specified assessment run. See also: AWS API Documentation Exceptions Examples Information about the data that is collected for the specified assessment run. Expected Output: :example: response = client.get_telemetry_metadata( assessmentRunArn='string' ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN that specifies the assessment run that has the telemetry data that you want to obtain.\n :rtype: dict ReturnsResponse Syntax{ 'telemetryMetadata': [ { 'messageType': 'string', 'count': 123, 'dataSize': 123 }, ] } Response Structure (dict) -- telemetryMetadata (list) --Telemetry details. (dict) --The metadata about the Amazon Inspector application data metrics collected by the agent. This data type is used as the response element in the GetTelemetryMetadata action. messageType (string) --A specific type of behavioral data that is collected by the agent. count (integer) --The count of messages that the agent sends to the Amazon Inspector service. dataSize (integer) --The data size of messages that the agent sends to the Amazon Inspector service. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Information about the data that is collected for the specified assessment run. response = client.get_telemetry_metadata( assessmentRunArn='arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', ) print(response) Expected Output: { 'telemetryMetadata': [ { 'count': 2, 'dataSize': 345, 'messageType': 'InspectorDuplicateProcess', }, { 'count': 3, 'dataSize': 255, 'messageType': 'InspectorTimeEventMsg', }, { 'count': 4, 'dataSize': 1082, 'messageType': 'InspectorNetworkInterface', }, { 'count': 2, 'dataSize': 349, 'messageType': 'InspectorDnsEntry', }, { 'count': 11, 'dataSize': 2514, 'messageType': 'InspectorDirectoryInfoMsg', }, { 'count': 1, 'dataSize': 179, 'messageType': 'InspectorTcpV6ListeningPort', }, { 'count': 101, 'dataSize': 10949, 'messageType': 'InspectorTerminal', }, { 'count': 26, 'dataSize': 5916, 'messageType': 'InspectorUser', }, { 'count': 282, 'dataSize': 32148, 'messageType': 'InspectorDynamicallyLoadedCodeModule', }, { 'count': 18, 'dataSize': 10172, 'messageType': 'InspectorCreateProcess', }, { 'count': 3, 'dataSize': 8001, 'messageType': 'InspectorProcessPerformance', }, { 'count': 1, 'dataSize': 360, 'messageType': 'InspectorOperatingSystem', }, { 'count': 6, 'dataSize': 546, 'messageType': 'InspectorStopProcess', }, { 'count': 1, 'dataSize': 1553, 'messageType': 'InspectorInstanceMetaData', }, { 'count': 2, 'dataSize': 434, 'messageType': 'InspectorTcpV4Connection', }, { 'count': 474, 'dataSize': 2960322, 'messageType': 'InspectorPackageInfo', }, { 'count': 3, 'dataSize': 2235, 'messageType': 'InspectorSystemPerformance', }, { 'count': 105, 'dataSize': 46048, 'messageType': 'InspectorCodeModule', }, { 'count': 1, 'dataSize': 182, 'messageType': 'InspectorUdpV6ListeningPort', }, { 'count': 2, 'dataSize': 371, 'messageType': 'InspectorUdpV4ListeningPort', }, { 'count': 18, 'dataSize': 8362, 'messageType': 'InspectorKernelModule', }, { 'count': 29, 'dataSize': 48788, 'messageType': 'InspectorConfigurationInfo', }, { 'count': 1, 'dataSize': 79, 'messageType': 'InspectorMonitoringStart', }, { 'count': 5, 'dataSize': 0, 'messageType': 'InspectorSplitMsgBegin', }, { 'count': 51, 'dataSize': 4593, 'messageType': 'InspectorGroup', }, { 'count': 1, 'dataSize': 184, 'messageType': 'InspectorTcpV4ListeningPort', }, { 'count': 1159, 'dataSize': 3146579, 'messageType': 'Total', }, { 'count': 5, 'dataSize': 0, 'messageType': 'InspectorSplitMsgEnd', }, { 'count': 1, 'dataSize': 612, 'messageType': 'InspectorLoadImageInProcess', }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'telemetryMetadata': [ { 'messageType': 'string', 'count': 123, 'dataSize': 123 }, ] } """ pass def get_waiter(waiter_name=None): """ Returns an object that can wait for some condition. :type waiter_name: str :param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters. :rtype: botocore.waiter.Waiter """ pass def list_assessment_run_agents(assessmentRunArn=None, filter=None, nextToken=None, maxResults=None): """ Lists the agents of the assessment runs that are specified by the ARNs of the assessment runs. See also: AWS API Documentation Exceptions Examples Lists the agents of the assessment runs that are specified by the ARNs of the assessment runs. Expected Output: :example: response = client.list_assessment_run_agents( assessmentRunArn='string', filter={ 'agentHealths': [ 'HEALTHY'|'UNHEALTHY'|'UNKNOWN', ], 'agentHealthCodes': [ 'IDLE'|'RUNNING'|'SHUTDOWN'|'UNHEALTHY'|'THROTTLED'|'UNKNOWN', ] }, nextToken='string', maxResults=123 ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN that specifies the assessment run whose agents you want to list.\n :type filter: dict :param filter: You can use this parameter to specify a subset of data to be included in the action\'s response.\nFor a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.\n\nagentHealths (list) -- [REQUIRED]The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY .\n\n(string) --\n\n\nagentHealthCodes (list) -- [REQUIRED]The detailed health state of the agent. Values can be set to IDLE , RUNNING , SHUTDOWN , UNHEALTHY , THROTTLED , and UNKNOWN .\n\n(string) --\n\n\n\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentRunAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'assessmentRunAgents': [ { 'agentId': 'string', 'assessmentRunArn': 'string', 'agentHealth': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN', 'agentHealthCode': 'IDLE'|'RUNNING'|'SHUTDOWN'|'UNHEALTHY'|'THROTTLED'|'UNKNOWN', 'agentHealthDetails': 'string', 'autoScalingGroup': 'string', 'telemetryMetadata': [ { 'messageType': 'string', 'count': 123, 'dataSize': 123 }, ] }, ], 'nextToken': 'string' } Response Structure (dict) -- assessmentRunAgents (list) -- A list of ARNs that specifies the agents returned by the action. (dict) -- Contains information about an Amazon Inspector agent. This data type is used as a response element in the ListAssessmentRunAgents action. agentId (string) -- The AWS account of the EC2 instance where the agent is installed. assessmentRunArn (string) -- The ARN of the assessment run that is associated with the agent. agentHealth (string) -- The current health state of the agent. agentHealthCode (string) -- The detailed health state of the agent. agentHealthDetails (string) -- The description for the agent health code. autoScalingGroup (string) -- The Auto Scaling group of the EC2 instance that is specified by the agent ID. telemetryMetadata (list) -- The Amazon Inspector application data metrics that are collected by the agent. (dict) -- The metadata about the Amazon Inspector application data metrics collected by the agent. This data type is used as the response element in the GetTelemetryMetadata action. messageType (string) -- A specific type of behavioral data that is collected by the agent. count (integer) -- The count of messages that the agent sends to the Amazon Inspector service. dataSize (integer) -- The data size of messages that the agent sends to the Amazon Inspector service. nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists the agents of the assessment runs that are specified by the ARNs of the assessment runs. response = client.list_assessment_run_agents( assessmentRunArn='arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', maxResults=123, ) print(response) Expected Output: { 'assessmentRunAgents': [ { 'agentHealth': 'HEALTHY', 'agentHealthCode': 'RUNNING', 'agentId': 'i-49113b93', 'assessmentRunArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', 'telemetryMetadata': [ { 'count': 2, 'dataSize': 345, 'messageType': 'InspectorDuplicateProcess', }, { 'count': 3, 'dataSize': 255, 'messageType': 'InspectorTimeEventMsg', }, { 'count': 4, 'dataSize': 1082, 'messageType': 'InspectorNetworkInterface', }, { 'count': 2, 'dataSize': 349, 'messageType': 'InspectorDnsEntry', }, { 'count': 11, 'dataSize': 2514, 'messageType': 'InspectorDirectoryInfoMsg', }, { 'count': 1, 'dataSize': 179, 'messageType': 'InspectorTcpV6ListeningPort', }, { 'count': 101, 'dataSize': 10949, 'messageType': 'InspectorTerminal', }, { 'count': 26, 'dataSize': 5916, 'messageType': 'InspectorUser', }, { 'count': 282, 'dataSize': 32148, 'messageType': 'InspectorDynamicallyLoadedCodeModule', }, { 'count': 18, 'dataSize': 10172, 'messageType': 'InspectorCreateProcess', }, { 'count': 3, 'dataSize': 8001, 'messageType': 'InspectorProcessPerformance', }, { 'count': 1, 'dataSize': 360, 'messageType': 'InspectorOperatingSystem', }, { 'count': 6, 'dataSize': 546, 'messageType': 'InspectorStopProcess', }, { 'count': 1, 'dataSize': 1553, 'messageType': 'InspectorInstanceMetaData', }, { 'count': 2, 'dataSize': 434, 'messageType': 'InspectorTcpV4Connection', }, { 'count': 474, 'dataSize': 2960322, 'messageType': 'InspectorPackageInfo', }, { 'count': 3, 'dataSize': 2235, 'messageType': 'InspectorSystemPerformance', }, { 'count': 105, 'dataSize': 46048, 'messageType': 'InspectorCodeModule', }, { 'count': 1, 'dataSize': 182, 'messageType': 'InspectorUdpV6ListeningPort', }, { 'count': 2, 'dataSize': 371, 'messageType': 'InspectorUdpV4ListeningPort', }, { 'count': 18, 'dataSize': 8362, 'messageType': 'InspectorKernelModule', }, { 'count': 29, 'dataSize': 48788, 'messageType': 'InspectorConfigurationInfo', }, { 'count': 1, 'dataSize': 79, 'messageType': 'InspectorMonitoringStart', }, { 'count': 5, 'dataSize': 0, 'messageType': 'InspectorSplitMsgBegin', }, { 'count': 51, 'dataSize': 4593, 'messageType': 'InspectorGroup', }, { 'count': 1, 'dataSize': 184, 'messageType': 'InspectorTcpV4ListeningPort', }, { 'count': 1159, 'dataSize': 3146579, 'messageType': 'Total', }, { 'count': 5, 'dataSize': 0, 'messageType': 'InspectorSplitMsgEnd', }, { 'count': 1, 'dataSize': 612, 'messageType': 'InspectorLoadImageInProcess', }, ], }, ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentRunAgents': [ { 'agentId': 'string', 'assessmentRunArn': 'string', 'agentHealth': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN', 'agentHealthCode': 'IDLE'|'RUNNING'|'SHUTDOWN'|'UNHEALTHY'|'THROTTLED'|'UNKNOWN', 'agentHealthDetails': 'string', 'autoScalingGroup': 'string', 'telemetryMetadata': [ { 'messageType': 'string', 'count': 123, 'dataSize': 123 }, ] }, ], 'nextToken': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException """ pass def list_assessment_runs(assessmentTemplateArns=None, filter=None, nextToken=None, maxResults=None): """ Lists the assessment runs that correspond to the assessment templates that are specified by the ARNs of the assessment templates. See also: AWS API Documentation Exceptions Examples Lists the assessment runs that correspond to the assessment templates that are specified by the ARNs of the assessment templates. Expected Output: :example: response = client.list_assessment_runs( assessmentTemplateArns=[ 'string', ], filter={ 'namePattern': 'string', 'states': [ 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'CANCELED', ], 'durationRange': { 'minSeconds': 123, 'maxSeconds': 123 }, 'rulesPackageArns': [ 'string', ], 'startTimeRange': { 'beginDate': datetime(2015, 1, 1), 'endDate': datetime(2015, 1, 1) }, 'completionTimeRange': { 'beginDate': datetime(2015, 1, 1), 'endDate': datetime(2015, 1, 1) }, 'stateChangeTimeRange': { 'beginDate': datetime(2015, 1, 1), 'endDate': datetime(2015, 1, 1) } }, nextToken='string', maxResults=123 ) :type assessmentTemplateArns: list :param assessmentTemplateArns: The ARNs that specify the assessment templates whose assessment runs you want to list.\n\n(string) --\n\n :type filter: dict :param filter: You can use this parameter to specify a subset of data to be included in the action\'s response.\nFor a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.\n\nnamePattern (string) --For a record to match a filter, an explicit value or a string containing a wildcard that is specified for this data type property must match the value of the assessmentRunName property of the AssessmentRun data type.\n\nstates (list) --For a record to match a filter, one of the values specified for this data type property must be the exact match of the value of the assessmentRunState property of the AssessmentRun data type.\n\n(string) --\n\n\ndurationRange (dict) --For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the AssessmentRun data type.\n\nminSeconds (integer) --The minimum value of the duration range. Must be greater than zero.\n\nmaxSeconds (integer) --The maximum value of the duration range. Must be less than or equal to 604800 seconds (1 week).\n\n\n\nrulesPackageArns (list) --For a record to match a filter, the value that is specified for this data type property must be contained in the list of values of the rulesPackages property of the AssessmentRun data type.\n\n(string) --\n\n\nstartTimeRange (dict) --For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the startTime property of the AssessmentRun data type.\n\nbeginDate (datetime) --The minimum value of the timestamp range.\n\nendDate (datetime) --The maximum value of the timestamp range.\n\n\n\ncompletionTimeRange (dict) --For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the completedAt property of the AssessmentRun data type.\n\nbeginDate (datetime) --The minimum value of the timestamp range.\n\nendDate (datetime) --The maximum value of the timestamp range.\n\n\n\nstateChangeTimeRange (dict) --For a record to match a filter, the value that is specified for this data type property must match the stateChangedAt property of the AssessmentRun data type.\n\nbeginDate (datetime) --The minimum value of the timestamp range.\n\nendDate (datetime) --The maximum value of the timestamp range.\n\n\n\n\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentRuns action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'assessmentRunArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- assessmentRunArns (list) -- A list of ARNs that specifies the assessment runs that are returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists the assessment runs that correspond to the assessment templates that are specified by the ARNs of the assessment templates. response = client.list_assessment_runs( assessmentTemplateArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw', ], maxResults=123, ) print(response) Expected Output: { 'assessmentRunArns': [ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-v5D6fI3v', ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentRunArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_assessment_targets(filter=None, nextToken=None, maxResults=None): """ Lists the ARNs of the assessment targets within this AWS account. For more information about assessment targets, see Amazon Inspector Assessment Targets . See also: AWS API Documentation Exceptions Examples Lists the ARNs of the assessment targets within this AWS account. Expected Output: :example: response = client.list_assessment_targets( filter={ 'assessmentTargetNamePattern': 'string' }, nextToken='string', maxResults=123 ) :type filter: dict :param filter: You can use this parameter to specify a subset of data to be included in the action\'s response.\nFor a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.\n\nassessmentTargetNamePattern (string) --For a record to match a filter, an explicit value or a string that contains a wildcard that is specified for this data type property must match the value of the assessmentTargetName property of the AssessmentTarget data type.\n\n\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentTargets action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'assessmentTargetArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- assessmentTargetArns (list) -- A list of ARNs that specifies the assessment targets that are returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Examples Lists the ARNs of the assessment targets within this AWS account. response = client.list_assessment_targets( maxResults=123, ) print(response) Expected Output: { 'assessmentTargetArns': [ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTargetArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_assessment_templates(assessmentTargetArns=None, filter=None, nextToken=None, maxResults=None): """ Lists the assessment templates that correspond to the assessment targets that are specified by the ARNs of the assessment targets. See also: AWS API Documentation Exceptions Examples Lists the assessment templates that correspond to the assessment targets that are specified by the ARNs of the assessment targets. Expected Output: :example: response = client.list_assessment_templates( assessmentTargetArns=[ 'string', ], filter={ 'namePattern': 'string', 'durationRange': { 'minSeconds': 123, 'maxSeconds': 123 }, 'rulesPackageArns': [ 'string', ] }, nextToken='string', maxResults=123 ) :type assessmentTargetArns: list :param assessmentTargetArns: A list of ARNs that specifies the assessment targets whose assessment templates you want to list.\n\n(string) --\n\n :type filter: dict :param filter: You can use this parameter to specify a subset of data to be included in the action\'s response.\nFor a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.\n\nnamePattern (string) --For a record to match a filter, an explicit value or a string that contains a wildcard that is specified for this data type property must match the value of the assessmentTemplateName property of the AssessmentTemplate data type.\n\ndurationRange (dict) --For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the AssessmentTemplate data type.\n\nminSeconds (integer) --The minimum value of the duration range. Must be greater than zero.\n\nmaxSeconds (integer) --The maximum value of the duration range. Must be less than or equal to 604800 seconds (1 week).\n\n\n\nrulesPackageArns (list) --For a record to match a filter, the values that are specified for this data type property must be contained in the list of values of the rulesPackageArns property of the AssessmentTemplate data type.\n\n(string) --\n\n\n\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentTemplates action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'assessmentTemplateArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- assessmentTemplateArns (list) -- A list of ARNs that specifies the assessment templates returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists the assessment templates that correspond to the assessment targets that are specified by the ARNs of the assessment targets. response = client.list_assessment_templates( assessmentTargetArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', ], maxResults=123, ) print(response) Expected Output: { 'assessmentTemplateArns': [ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw', 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-Uza6ihLh', ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentTemplateArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_event_subscriptions(resourceArn=None, nextToken=None, maxResults=None): """ Lists all the event subscriptions for the assessment template that is specified by the ARN of the assessment template. For more information, see SubscribeToEvent and UnsubscribeFromEvent . See also: AWS API Documentation Exceptions Examples Lists all the event subscriptions for the assessment template that is specified by the ARN of the assessment template. Expected Output: :example: response = client.list_event_subscriptions( resourceArn='string', nextToken='string', maxResults=123 ) :type resourceArn: string :param resourceArn: The ARN of the assessment template for which you want to list the existing event subscriptions. :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListEventSubscriptions action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'subscriptions': [ { 'resourceArn': 'string', 'topicArn': 'string', 'eventSubscriptions': [ { 'event': 'ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', 'subscribedAt': datetime(2015, 1, 1) }, ] }, ], 'nextToken': 'string' } Response Structure (dict) -- subscriptions (list) -- Details of the returned event subscriptions. (dict) -- This data type is used as a response element in the ListEventSubscriptions action. resourceArn (string) -- The ARN of the assessment template that is used during the event for which the SNS notification is sent. topicArn (string) -- The ARN of the Amazon Simple Notification Service (SNS) topic to which the SNS notifications are sent. eventSubscriptions (list) -- The list of existing event subscriptions. (dict) -- This data type is used in the Subscription data type. event (string) -- The event for which Amazon Simple Notification Service (SNS) notifications are sent. subscribedAt (datetime) -- The time at which SubscribeToEvent is called. nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists all the event subscriptions for the assessment template that is specified by the ARN of the assessment template. response = client.list_event_subscriptions( maxResults=123, resourceArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-7sbz2Kz0', ) print(response) Expected Output: { 'nextToken': '1', 'subscriptions': [ { 'eventSubscriptions': [ { 'event': 'ASSESSMENT_RUN_COMPLETED', 'subscribedAt': datetime(2016, 3, 31, 20, 17, 20, 3, 91, 0), }, ], 'resourceArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-7sbz2Kz0', 'topicArn': 'arn:aws:sns:us-west-2:123456789012:exampletopic', }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'subscriptions': [ { 'resourceArn': 'string', 'topicArn': 'string', 'eventSubscriptions': [ { 'event': 'ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', 'subscribedAt': datetime(2015, 1, 1) }, ] }, ], 'nextToken': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException """ pass def list_exclusions(assessmentRunArn=None, nextToken=None, maxResults=None): """ List exclusions that are generated by the assessment run. See also: AWS API Documentation Exceptions :example: response = client.list_exclusions( assessmentRunArn='string', nextToken='string', maxResults=123 ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN of the assessment run that generated the exclusions that you want to list.\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListExclusionsRequest action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 100. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'exclusionArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- exclusionArns (list) -- A list of exclusions\' ARNs returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameters is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException :return: { 'exclusionArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_findings(assessmentRunArns=None, filter=None, nextToken=None, maxResults=None): """ Lists findings that are generated by the assessment runs that are specified by the ARNs of the assessment runs. See also: AWS API Documentation Exceptions Examples Lists findings that are generated by the assessment runs that are specified by the ARNs of the assessment runs. Expected Output: :example: response = client.list_findings( assessmentRunArns=[ 'string', ], filter={ 'agentIds': [ 'string', ], 'autoScalingGroups': [ 'string', ], 'ruleNames': [ 'string', ], 'severities': [ 'Low'|'Medium'|'High'|'Informational'|'Undefined', ], 'rulesPackageArns': [ 'string', ], 'attributes': [ { 'key': 'string', 'value': 'string' }, ], 'userAttributes': [ { 'key': 'string', 'value': 'string' }, ], 'creationTimeRange': { 'beginDate': datetime(2015, 1, 1), 'endDate': datetime(2015, 1, 1) } }, nextToken='string', maxResults=123 ) :type assessmentRunArns: list :param assessmentRunArns: The ARNs of the assessment runs that generate the findings that you want to list.\n\n(string) --\n\n :type filter: dict :param filter: You can use this parameter to specify a subset of data to be included in the action\'s response.\nFor a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.\n\nagentIds (list) --For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the agentId property of the Finding data type.\n\n(string) --\n\n\nautoScalingGroups (list) --For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the autoScalingGroup property of the Finding data type.\n\n(string) --\n\n\nruleNames (list) --For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the ruleName property of the Finding data type.\n\n(string) --\n\n\nseverities (list) --For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the severity property of the Finding data type.\n\n(string) --\n\n\nrulesPackageArns (list) --For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the rulesPackageArn property of the Finding data type.\n\n(string) --\n\n\nattributes (list) --For a record to match a filter, the list of values that are specified for this data type property must be contained in the list of values of the attributes property of the Finding data type.\n\n(dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions.\n\nkey (string) -- [REQUIRED]The attribute key.\n\nvalue (string) --The value assigned to the attribute key.\n\n\n\n\n\nuserAttributes (list) --For a record to match a filter, the value that is specified for this data type property must be contained in the list of values of the userAttributes property of the Finding data type.\n\n(dict) --This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions.\n\nkey (string) -- [REQUIRED]The attribute key.\n\nvalue (string) --The value assigned to the attribute key.\n\n\n\n\n\ncreationTimeRange (dict) --The time range during which the finding is generated.\n\nbeginDate (datetime) --The minimum value of the timestamp range.\n\nendDate (datetime) --The maximum value of the timestamp range.\n\n\n\n\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListFindings action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'findingArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- findingArns (list) -- A list of ARNs that specifies the findings returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists findings that are generated by the assessment runs that are specified by the ARNs of the assessment runs. response = client.list_findings( assessmentRunArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE', ], maxResults=123, ) print(response) Expected Output: { 'findingArns': [ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-MKkpXXPE/finding/0-HwPnsDm4', 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw/run/0-v5D6fI3v/finding/0-tyvmqBLy', ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'findingArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_rules_packages(nextToken=None, maxResults=None): """ Lists all available Amazon Inspector rules packages. See also: AWS API Documentation Exceptions Examples Lists all available Amazon Inspector rules packages. Expected Output: :example: response = client.list_rules_packages( nextToken='string', maxResults=123 ) :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'rulesPackageArns': [ 'string', ], 'nextToken': 'string' } Response Structure (dict) -- rulesPackageArns (list) -- The list of ARNs that specifies the rules packages returned by the action. (string) -- nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Examples Lists all available Amazon Inspector rules packages. response = client.list_rules_packages( maxResults=123, ) print(response) Expected Output: { 'nextToken': '1', 'rulesPackageArns': [ 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-9hgA516p', 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-H5hpSawc', 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ', 'arn:aws:inspector:us-west-2:758058086616:rulespackage/0-vg5GGHSD', ], 'ResponseMetadata': { '...': '...', }, } :return: { 'rulesPackageArns': [ 'string', ], 'nextToken': 'string' } :returns: (string) -- """ pass def list_tags_for_resource(resourceArn=None): """ Lists all tags associated with an assessment template. See also: AWS API Documentation Exceptions Examples Lists all tags associated with an assessment template. Expected Output: :example: response = client.list_tags_for_resource( resourceArn='string' ) :type resourceArn: string :param resourceArn: [REQUIRED]\nThe ARN that specifies the assessment template whose tags you want to list.\n :rtype: dict ReturnsResponse Syntax{ 'tags': [ { 'key': 'string', 'value': 'string' }, ] } Response Structure (dict) -- tags (list) --A collection of key and value pairs. (dict) --A key and value pair. This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action. key (string) --A tag key. value (string) --A value assigned to a tag key. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Examples Lists all tags associated with an assessment template. response = client.list_tags_for_resource( resourceArn='arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-gcwFliYu', ) print(response) Expected Output: { 'tags': [ { 'key': 'Name', 'value': 'Example', }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'tags': [ { 'key': 'string', 'value': 'string' }, ] } """ pass def preview_agents(previewAgentsArn=None, nextToken=None, maxResults=None): """ Previews the agents installed on the EC2 instances that are part of the specified assessment target. See also: AWS API Documentation Exceptions Examples Previews the agents installed on the EC2 instances that are part of the specified assessment target. Expected Output: :example: response = client.preview_agents( previewAgentsArn='string', nextToken='string', maxResults=123 ) :type previewAgentsArn: string :param previewAgentsArn: [REQUIRED]\nThe ARN of the assessment target whose agents you want to preview.\n :type nextToken: string :param nextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the PreviewAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :type maxResults: integer :param maxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500. :rtype: dict ReturnsResponse Syntax { 'agentPreviews': [ { 'hostname': 'string', 'agentId': 'string', 'autoScalingGroup': 'string', 'agentHealth': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN', 'agentVersion': 'string', 'operatingSystem': 'string', 'kernelVersion': 'string', 'ipv4Address': 'string' }, ], 'nextToken': 'string' } Response Structure (dict) -- agentPreviews (list) -- The resulting list of agents. (dict) -- Used as a response element in the PreviewAgents action. hostname (string) -- The hostname of the EC2 instance on which the Amazon Inspector Agent is installed. agentId (string) -- The ID of the EC2 instance where the agent is installed. autoScalingGroup (string) -- The Auto Scaling group for the EC2 instance where the agent is installed. agentHealth (string) -- The health status of the Amazon Inspector Agent. agentVersion (string) -- The version of the Amazon Inspector Agent. operatingSystem (string) -- The operating system running on the EC2 instance on which the Amazon Inspector Agent is installed. kernelVersion (string) -- The kernel version of the operating system running on the EC2 instance on which the Amazon Inspector Agent is installed. ipv4Address (string) -- The IP address of the EC2 instance on which the Amazon Inspector Agent is installed. nextToken (string) -- When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException Examples Previews the agents installed on the EC2 instances that are part of the specified assessment target. response = client.preview_agents( maxResults=123, previewAgentsArn='arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq', ) print(response) Expected Output: { 'agentPreviews': [ { 'agentId': 'i-49113b93', }, ], 'nextToken': '1', 'ResponseMetadata': { '...': '...', }, } :return: { 'agentPreviews': [ { 'hostname': 'string', 'agentId': 'string', 'autoScalingGroup': 'string', 'agentHealth': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN', 'agentVersion': 'string', 'operatingSystem': 'string', 'kernelVersion': 'string', 'ipv4Address': 'string' }, ], 'nextToken': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException """ pass def register_cross_account_access_role(roleArn=None): """ Registers the IAM role that grants Amazon Inspector access to AWS Services needed to perform security assessments. See also: AWS API Documentation Exceptions Examples Registers the IAM role that Amazon Inspector uses to list your EC2 instances at the start of the assessment run or when you call the PreviewAgents action. Expected Output: :example: response = client.register_cross_account_access_role( roleArn='string' ) :type roleArn: string :param roleArn: [REQUIRED]\nThe ARN of the IAM role that grants Amazon Inspector access to AWS Services needed to perform security assessments.\n :return: response = client.register_cross_account_access_role( roleArn='arn:aws:iam::123456789012:role/inspector', ) print(response) """ pass def remove_attributes_from_findings(findingArns=None, attributeKeys=None): """ Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists. See also: AWS API Documentation Exceptions Examples Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists. Expected Output: :example: response = client.remove_attributes_from_findings( findingArns=[ 'string', ], attributeKeys=[ 'string', ] ) :type findingArns: list :param findingArns: [REQUIRED]\nThe ARNs that specify the findings that you want to remove attributes from.\n\n(string) --\n\n :type attributeKeys: list :param attributeKeys: [REQUIRED]\nThe array of attribute keys that you want to remove from specified findings.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax { 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } Response Structure (dict) -- failedItems (dict) -- Attributes details that cannot be described. An error code is provided for each failed item. (string) -- (dict) -- Includes details about the failed items. failureCode (string) -- The status code of a failed item. retryable (boolean) -- Indicates whether you can immediately retry a request for this item for a specified resource. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists. response = client.remove_attributes_from_findings( attributeKeys=[ 'key=Example,value=example', ], findingArns=[ 'arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-8l1VIE0D/run/0-Z02cjjug/finding/0-T8yM9mEU', ], ) print(response) Expected Output: { 'failedItems': { }, 'ResponseMetadata': { '...': '...', }, } :return: { 'failedItems': { 'string': { 'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'retryable': True|False } } } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def set_tags_for_resource(resourceArn=None, tags=None): """ Sets tags (key and value pairs) to the assessment template that is specified by the ARN of the assessment template. See also: AWS API Documentation Exceptions Examples Sets tags (key and value pairs) to the assessment template that is specified by the ARN of the assessment template. Expected Output: :example: response = client.set_tags_for_resource( resourceArn='string', tags=[ { 'key': 'string', 'value': 'string' }, ] ) :type resourceArn: string :param resourceArn: [REQUIRED]\nThe ARN of the assessment template that you want to set tags to.\n :type tags: list :param tags: A collection of key and value pairs that you want to set to the assessment template.\n\n(dict) --A key and value pair. This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action.\n\nkey (string) -- [REQUIRED]A tag key.\n\nvalue (string) --A value assigned to a tag key.\n\n\n\n\n :return: response = client.set_tags_for_resource( resourceArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-7sbz2Kz0', tags=[ { 'key': 'Example', 'value': 'example', }, ], ) print(response) :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def start_assessment_run(assessmentTemplateArn=None, assessmentRunName=None): """ Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account. See also: AWS API Documentation Exceptions Examples Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account. Expected Output: :example: response = client.start_assessment_run( assessmentTemplateArn='string', assessmentRunName='string' ) :type assessmentTemplateArn: string :param assessmentTemplateArn: [REQUIRED]\nThe ARN of the assessment template of the assessment run that you want to start.\n :type assessmentRunName: string :param assessmentRunName: You can specify the name for the assessment run. The name must be unique for the assessment template whose ARN is used to start the assessment run. :rtype: dict ReturnsResponse Syntax { 'assessmentRunArn': 'string' } Response Structure (dict) -- assessmentRunArn (string) -- The ARN of the assessment run that has been started. Exceptions Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException Inspector.Client.exceptions.AgentsAlreadyRunningAssessmentException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException Examples Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account. response = client.start_assessment_run( assessmentRunName='examplerun', assessmentTemplateArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T', ) print(response) Expected Output: { 'assessmentRunArn': 'arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T/run/0-jOoroxyY', 'ResponseMetadata': { '...': '...', }, } :return: { 'assessmentRunArn': 'string' } :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.InvalidCrossAccountRoleException Inspector.Client.exceptions.AgentsAlreadyRunningAssessmentException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def stop_assessment_run(assessmentRunArn=None, stopAction=None): """ Stops the assessment run that is specified by the ARN of the assessment run. See also: AWS API Documentation Exceptions Examples Stops the assessment run that is specified by the ARN of the assessment run. Expected Output: :example: response = client.stop_assessment_run( assessmentRunArn='string', stopAction='START_EVALUATION'|'SKIP_EVALUATION' ) :type assessmentRunArn: string :param assessmentRunArn: [REQUIRED]\nThe ARN of the assessment run that you want to stop.\n :type stopAction: string :param stopAction: An input option that can be set to either START_EVALUATION or SKIP_EVALUATION. START_EVALUATION (the default value), stops the AWS agent from collecting data and begins the results evaluation and the findings generation process. SKIP_EVALUATION cancels the assessment run immediately, after which no findings are generated. :return: response = client.stop_assessment_run( assessmentRunArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-it5r2S4T/run/0-11LMTAVe', ) print(response) :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def subscribe_to_event(resourceArn=None, event=None, topicArn=None): """ Enables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic. See also: AWS API Documentation Exceptions Examples Enables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic. Expected Output: :example: response = client.subscribe_to_event( resourceArn='string', event='ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', topicArn='string' ) :type resourceArn: string :param resourceArn: [REQUIRED]\nThe ARN of the assessment template that is used during the event for which you want to receive SNS notifications.\n :type event: string :param event: [REQUIRED]\nThe event for which you want to receive SNS notifications.\n :type topicArn: string :param topicArn: [REQUIRED]\nThe ARN of the SNS topic to which the SNS notifications are sent.\n :return: response = client.subscribe_to_event( event='ASSESSMENT_RUN_COMPLETED', resourceArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-7sbz2Kz0', topicArn='arn:aws:sns:us-west-2:123456789012:exampletopic', ) print(response) :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.LimitExceededException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def unsubscribe_from_event(resourceArn=None, event=None, topicArn=None): """ Disables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic. See also: AWS API Documentation Exceptions Examples Disables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic. Expected Output: :example: response = client.unsubscribe_from_event( resourceArn='string', event='ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER', topicArn='string' ) :type resourceArn: string :param resourceArn: [REQUIRED]\nThe ARN of the assessment template that is used during the event for which you want to stop receiving SNS notifications.\n :type event: string :param event: [REQUIRED]\nThe event for which you want to stop receiving SNS notifications.\n :type topicArn: string :param topicArn: [REQUIRED]\nThe ARN of the SNS topic to which SNS notifications are sent.\n :return: response = client.unsubscribe_from_event( event='ASSESSMENT_RUN_COMPLETED', resourceArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX/template/0-7sbz2Kz0', topicArn='arn:aws:sns:us-west-2:123456789012:exampletopic', ) print(response) :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass def update_assessment_target(assessmentTargetArn=None, assessmentTargetName=None, resourceGroupArn=None): """ Updates the assessment target that is specified by the ARN of the assessment target. If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target. See also: AWS API Documentation Exceptions Examples Updates the assessment target that is specified by the ARN of the assessment target. Expected Output: :example: response = client.update_assessment_target( assessmentTargetArn='string', assessmentTargetName='string', resourceGroupArn='string' ) :type assessmentTargetArn: string :param assessmentTargetArn: [REQUIRED]\nThe ARN of the assessment target that you want to update.\n :type assessmentTargetName: string :param assessmentTargetName: [REQUIRED]\nThe name of the assessment target that you want to update.\n :type resourceGroupArn: string :param resourceGroupArn: The ARN of the resource group that is used to specify the new resource group to associate with the assessment target. :return: response = client.update_assessment_target( assessmentTargetArn='arn:aws:inspector:us-west-2:123456789012:target/0-nvgVhaxX', assessmentTargetName='Example', resourceGroupArn='arn:aws:inspector:us-west-2:123456789012:resourcegroup/0-yNbgL5Pt', ) print(response) :returns: Inspector.Client.exceptions.InternalException Inspector.Client.exceptions.InvalidInputException Inspector.Client.exceptions.AccessDeniedException Inspector.Client.exceptions.NoSuchEntityException Inspector.Client.exceptions.ServiceTemporarilyUnavailableException """ pass
30.715707
2,598
0.641974
15,315
145,101
6.043748
0.059092
0.012911
0.054019
0.010836
0.830337
0.793129
0.758124
0.721986
0.702625
0.674838
0
0.022075
0.269771
145,101
4,723
2,599
30.72221
0.851487
0.971475
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
10
f18af5f19d5e99598f7e907eca3b371447134081
121
py
Python
torchcv/__init__.py
CVHj/torchcv
6291f3e1e4bbf6467fd6b1e79001d34a59481bb6
[ "MIT" ]
433
2017-11-30T15:46:58.000Z
2022-01-16T08:06:11.000Z
torchcv/__init__.py
CVHj/torchcv
6291f3e1e4bbf6467fd6b1e79001d34a59481bb6
[ "MIT" ]
51
2018-01-29T15:14:33.000Z
2021-08-23T12:02:18.000Z
fpn-hoi/torchcv/__init__.py
TheFairBear/Box-Attention-SSD-HOI
6101e209a709899c5645342784c8f451028ff46e
[ "MIT" ]
92
2018-01-20T07:45:36.000Z
2021-05-28T10:43:53.000Z
from torchcv import utils from torchcv import datasets from torchcv import transforms from torchcv import visualizations
24.2
34
0.867769
16
121
6.5625
0.4375
0.419048
0.647619
0
0
0
0
0
0
0
0
0
0.132231
121
4
35
30.25
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
74e44d5cf3b97de9b13fae29819af8797196248f
17,715
py
Python
stix2generator/test/test_object_generator_object.py
majacQ/cti-stix-generator
7465ecd29ef6caabf9f1b60ad45dad789c475028
[ "BSD-3-Clause" ]
20
2020-12-10T18:16:28.000Z
2022-02-20T19:30:53.000Z
stix2generator/test/test_object_generator_object.py
majacQ/cti-stix-generator
7465ecd29ef6caabf9f1b60ad45dad789c475028
[ "BSD-3-Clause" ]
26
2021-01-13T23:32:19.000Z
2022-03-29T06:47:02.000Z
stix2generator/test/test_object_generator_object.py
majacQ/cti-stix-generator
7465ecd29ef6caabf9f1b60ad45dad789c475028
[ "BSD-3-Clause" ]
8
2020-12-14T23:10:16.000Z
2021-12-06T13:07:24.000Z
import pytest import stix2generator.exceptions def test_object(object_generator): value = object_generator.generate_from_spec({ "type": "object", "properties": { "a": 1, "b": 2 } }) assert isinstance(value, dict) assert "a" in value assert "b" in value assert value == {"a": 1, "b": 2} def test_object_optional_props(object_generator, num_trials): for _ in range(num_trials): # test with "optional" value = object_generator.generate_from_spec({ "type": "object", "optional": ["b"], "properties": { "a": 1, "b": 2 } }) assert "a" in value assert all(prop_name in ("a", "b") for prop_name in value) # test with "required" value = object_generator.generate_from_spec({ "type": "object", "required": ["a"], "properties": { "a": 1, "b": 2 } }) assert "a" in value assert all(prop_name in ("a", "b") for prop_name in value) def test_object_presence_coconstraint_one_required( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"] }, "required": ["group-a"], "properties": { "a": 1, "b": 2 } }) # exactly one of "a" and "b" must be there assert "a" in value or "b" in value assert not ("a" in value and "b" in value) def test_object_presence_coconstraint_one_optional( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"] }, "optional": ["group-a"], "properties": { "a": 1, "b": 2 } }) # only "a", only "b", or no properties at all should be there assert ("a" in value and "b" not in value) or \ ("a" not in value and "b" in value) or \ not value def test_object_presence_coconstraint_at_least_one_required( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "at-least-one": ["group-a"] }, "required": ["group-a"], "properties": { "a": 1, "b": 2 } }) # one or both of "a" and "b" must be there assert "a" in value or "b" in value def test_object_presence_coconstraint_at_least_one_optional( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "at-least-one": ["group-a"] }, "optional": ["group-a"], "properties": { "a": 1, "b": 2 } }) # silly case: both, either, or neither of "a" and "b" can be present, # same as making them all optional. Shouldn't write a spec like this, # it's overly complicated. assert all(prop_name in ("a", "b") for prop_name in value) def test_object_presence_coconstraint_all_required( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "all": ["group-a"] }, "required": ["group-a"], "properties": { "a": 1, "b": 2 } }) # silly case: all properties in group-a are required, same as if they # were all individually declared to be required. Shouldn't write a # spec like this, it's overly complicated. assert "a" in value and "b" in value def test_object_presence_coconstraint_all_optional( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "all": ["group-a"] }, "optional": ["group-a"], "properties": { "a": 1, "b": 2 } }) # both or neither of "a" and "b" must be there. assert ("a" in value and "b" in value) or not value def test_object_presence_coconstraint_dependencies_props( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "dependencies": { "a": ["b", "c"] } }, "optional": ["a", "b", "c"], "properties": { "a": 1, "b": 2, "c": 3 } }) # If "a" is present, "b" and "c" must be present. If "a" is not # present, "b" and "c" may or may not be present (since they're # declared optional). if "a" in value: assert "b" in value and "c" in value else: assert all(prop_name in ("b", "c") for prop_name in value) def test_object_presence_coconstraint_dependencies_group_key( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"], "dependencies": { "group-a": ["c"] } }, "optional": ["group-a", "c"], "properties": { "a": 1, "b": 2, "c": 3 } }) # If "a" or "b" are present, then "c" must also be present. If neither # "a" nor "b" are present, "c" may or may not be present (since it's # declared optional). if "a" in value or "b" in value: assert "c" in value else: assert "c" in value or not value def test_object_presence_coconstraint_dependencies_group_value( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"], "dependencies": { "c": ["group-a"] } }, "optional": ["group-a", "c"], "properties": { "a": 1, "b": 2, "c": 3 } }) # If "c" is present, then exactly one of "a" or "b" must be present. # If "c" is not present, then exactly one or neither of "a" and "b" # must be present (since group-a is declared optional). if "c" in value: assert "a" in value or "b" in value else: assert "a" in value or "b" in value or not value def test_object_presence_coconstraint_dependencies_group_key_and_value( object_generator, num_trials ): for _ in range(num_trials): value = object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"], "group-b": ["c", "d"] }, "one": ["group-a"], "all": ["group-b"], "dependencies": { "group-a": ["group-b"] } }, "optional": ["group-a", "group-b"], "properties": { "a": 1, "b": 2, "c": 3, "d": 4 } }) # If exactly one of "a" or "b" is present, then both of "c" and "d" # must be present. If neither "a" nor "b" is present, then neither or # both of "c" and "d" may be present. if "a" in value or "b" in value: assert "c" in value and "d" in value else: assert ("c" in value and "d" in value) or not value def test_object_presence_coconstraint_errors1(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { # refer to undefined property in group "group-a": ["a", "c"] }, "one": ["group-a"] }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors2(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, # refer to undefined group in co-constraint "one": ["group-b"] }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors3(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "dependencies": { # refer to undefined property as value "a": ["b", "c"] } }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors4(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "dependencies": { # refer to undefined property as key "c": ["a", "b"] } }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors5(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"], "dependencies": { # refer to undefined group as value "c": ["group-b"] } }, "properties": { "a": 1, "b": 2, "c": 3 } }) def test_object_presence_coconstraint_errors6(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"], "dependencies": { # refer to undefined group as key "group-b": ["c"] } }, "properties": { "a": 1, "b": 2, "c": 3 } }) def test_object_presence_coconstraint_errors7(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] } # group-a is missing its constraint }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors8(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"], # groups can't overlap "group-b": ["b", "c"] }, "one": ["group-a", "group-b"] }, "properties": { "a": 1, "b": 2, "c": 3 } }) def test_object_presence_coconstraint_errors9(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"], "dependencies": { # can't directly refer to grouped properties "a": ["group-a"] } }, "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors10(object_generator): with pytest.raises(stix2generator.exceptions.ObjectGenerationError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { "group-a": ["a", "b"] }, "one": ["group-a"] }, # can't directly refer to grouped properties "required": ["a"], "properties": { "a": 1, "b": 2 } }) def test_object_presence_coconstraint_errors11(object_generator): with pytest.raises(stix2generator.exceptions.PresenceCoconstraintError): object_generator.generate_from_spec({ "type": "object", "presence-coconstraints": { "property-groups": { # can't name a group the same as a property "a": ["b", "c"] }, "one": ["a"] }, "properties": { "a": 1, "b": 2, "c": 3 } }) def test_object_value_coconstraint(object_generator): object_generator.generate_from_spec({ "type": "object", # Value co-constraints currently only used in certain semantics; # they will be ignored in this spec. Just check for errors. "value-coconstraints": [ "a < b", "a <= b", "a > b", "a >= b", "a = b", "a != b" ], "properties": { "a": 1, "b": 2 } }) def test_object_value_coconstraint_bad_operator(object_generator): with pytest.raises(stix2generator.exceptions.ValueCoconstraintError): object_generator.generate_from_spec({ "type": "object", # invalid operator "value-coconstraints": ["a $ b"], "properties": { "a": 1, "b": 2 } }) def test_object_value_coconstraint_bad_property(object_generator): with pytest.raises(stix2generator.exceptions.ValueCoconstraintError): object_generator.generate_from_spec({ "type": "object", # undefined property name "value-coconstraints": ["a = x"], "properties": { "a": 1, "b": 2 } }) def test_object_value_coconstraint_self_relation(object_generator): with pytest.raises(stix2generator.exceptions.ValueCoconstraintError): object_generator.generate_from_spec({ "type": "object", # can't relate a prop to itself "value-coconstraints": ["a = a"], "properties": { "a": 1, "b": 2 } })
30.025424
79
0.462659
1,654
17,715
4.787787
0.08283
0.10418
0.010986
0.014648
0.841268
0.830913
0.815886
0.776487
0.734689
0.723197
0
0.009152
0.414056
17,715
589
80
30.076401
0.753757
0.102681
0
0.678497
1
0
0.1421
0.029139
0
0
0
0
0.048017
1
0.056367
false
0
0.004175
0
0.060543
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
74ecb60517d69b00132d5b836206827fe4b86da3
114
py
Python
nodenet/python/nodenet/io/__init__.py
NOOXY-research/NodeNet
8bf7e0c2fd0e4fae4a51b2900014004728f3c935
[ "Apache-2.0" ]
2
2018-01-31T05:52:23.000Z
2020-08-07T19:14:18.000Z
nodenet/python/nodenet/io/__init__.py
NOOXY-research/NodeNet
8bf7e0c2fd0e4fae4a51b2900014004728f3c935
[ "Apache-2.0" ]
1
2017-11-22T09:39:50.000Z
2017-11-22T09:39:50.000Z
nodenet/python/nodenet/io/__init__.py
magneticchen/NodeNet
8bf7e0c2fd0e4fae4a51b2900014004728f3c935
[ "Apache-2.0" ]
null
null
null
# Create alias from nodenet.io.commons import * from nodenet.io.neuralnet import * from nodenet.io.data import *
19
34
0.77193
17
114
5.176471
0.529412
0.375
0.443182
0.431818
0
0
0
0
0
0
0
0
0.140351
114
5
35
22.8
0.897959
0.105263
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
7434c0d89d3d4e87b0dda3fb035edd974df54882
3,292
py
Python
test/pyaz/cdn/custom_domain/__init__.py
bigdatamoore/py-az-cli
54383a4ee7cc77556f6183e74e992eec95b28e01
[ "MIT" ]
null
null
null
test/pyaz/cdn/custom_domain/__init__.py
bigdatamoore/py-az-cli
54383a4ee7cc77556f6183e74e992eec95b28e01
[ "MIT" ]
9
2021-09-24T16:37:24.000Z
2021-12-24T00:39:19.000Z
test/pyaz/cdn/custom_domain/__init__.py
bigdatamoore/py-az-cli
54383a4ee7cc77556f6183e74e992eec95b28e01
[ "MIT" ]
null
null
null
import json, subprocess from ... pyaz_utils import get_cli_name, get_params def show(resource_group, profile_name, endpoint_name, name): params = get_params(locals()) command = "az cdn custom-domain show " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def delete(resource_group, profile_name, endpoint_name, name): params = get_params(locals()) command = "az cdn custom-domain delete " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def list(resource_group, profile_name, endpoint_name): params = get_params(locals()) command = "az cdn custom-domain list " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def create(resource_group, profile_name, endpoint_name, name, hostname, location=None, tags=None): params = get_params(locals()) command = "az cdn custom-domain create " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def enable_https(resource_group, profile_name, endpoint_name, name, user_cert_subscription_id=None, user_cert_group_name=None, user_cert_vault_name=None, user_cert_secret_name=None, user_cert_secret_version=None, user_cert_protocol_type=None, min_tls_version=None): params = get_params(locals()) command = "az cdn custom-domain enable-https " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def disable_https(resource_group, profile_name, endpoint_name, name): params = get_params(locals()) command = "az cdn custom-domain disable-https " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr)
37.409091
265
0.68226
414
3,292
5.304348
0.144928
0.076503
0.054645
0.065574
0.882058
0.862022
0.84745
0.829235
0.806466
0.806466
0
0.004592
0.206258
3,292
87
266
37.83908
0.835821
0
0
0.825
0
0
0.071993
0
0
0
0
0
0
1
0.075
false
0
0.025
0
0.175
0.225
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
746350c85cfdd1ca0b93081ee46de9756e432473
20
py
Python
src/Second Chapter/Exercise2.py
matthijskrul/ThinkPython
34c1d81f4ef535c32b8b0309b23c7ca37f851606
[ "MIT" ]
null
null
null
src/Second Chapter/Exercise2.py
matthijskrul/ThinkPython
34c1d81f4ef535c32b8b0309b23c7ca37f851606
[ "MIT" ]
null
null
null
src/Second Chapter/Exercise2.py
matthijskrul/ThinkPython
34c1d81f4ef535c32b8b0309b23c7ca37f851606
[ "MIT" ]
null
null
null
6 * (1-2) #6 * (1-2)
10
10
0.3
6
20
1
0.5
0.666667
1
0
0
0
0
0
0
0
0
0.4
0.25
20
2
10
10
0
0.45
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
9
746aa01bcaf30ec22c5ccc1f86c10e78426b6911
155
py
Python
tests/test_utils.py
Justin-Xiang/wdapy
7a5ac41df1c4226483b4291c02c38f693018db73
[ "MIT" ]
7
2021-08-30T02:54:25.000Z
2022-03-14T06:24:58.000Z
tests/test_utils.py
Justin-Xiang/wdapy
7a5ac41df1c4226483b4291c02c38f693018db73
[ "MIT" ]
5
2021-09-09T09:59:32.000Z
2022-01-14T06:39:23.000Z
tests/test_utils.py
Justin-Xiang/wdapy
7a5ac41df1c4226483b4291c02c38f693018db73
[ "MIT" ]
2
2021-09-07T08:23:58.000Z
2021-12-30T11:24:59.000Z
# coding: utf-8 # from wdapy._utils import camel_to_snake def test_camel_to_snake(): assert "this_is_my_string" == camel_to_snake("ThisIsMyString")
17.222222
66
0.76129
24
155
4.458333
0.75
0.196262
0.336449
0
0
0
0
0
0
0
0
0.007463
0.135484
155
8
67
19.375
0.791045
0.083871
0
0
0
0
0.223022
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
74a5b27ee0f22488b726781f6b3028694d59b8f7
6,879
py
Python
geonode/geonode/tests/test_rest_api.py
ttungbmt/BecaGIS_GeoPortal
6c05f9fc020ec4ccf600ba2503a06c2231443920
[ "MIT" ]
null
null
null
geonode/geonode/tests/test_rest_api.py
ttungbmt/BecaGIS_GeoPortal
6c05f9fc020ec4ccf600ba2503a06c2231443920
[ "MIT" ]
null
null
null
geonode/geonode/tests/test_rest_api.py
ttungbmt/BecaGIS_GeoPortal
6c05f9fc020ec4ccf600ba2503a06c2231443920
[ "MIT" ]
null
null
null
######################################################################### # # Copyright (C) 2020 OSGeo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from unittest.mock import patch, MagicMock from django.contrib.auth.models import AnonymousUser, Group from geonode.api.authorization import GroupAuthorization, GroupProfileAuthorization from geonode.groups.models import GroupProfile from geonode.people.models import Profile from geonode.tests.base import GeoNodeBaseTestSupport class TestGroupResAuthorization(GeoNodeBaseTestSupport): # Group fixture is loaded in base class @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=Group.objects.exclude(name='anonymous')) def test_super_admin_user(self, super_mock): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test', is_staff=True, is_superuser=True) } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupAuthorization().read_list([], mock_bundle) self.assertEqual(Group.objects.exclude(name='anonymous').count(), groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=Group.objects.exclude(name='anonymous')) @patch('geonode.people.models.Profile.group_list_all', return_value=[2]) def test_regular_user_hide_private(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test') } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(1, groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=Group.objects.exclude(name='anonymous')) @patch('geonode.people.models.Profile.group_list_all', return_value=[1]) def test_regular_user(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test') } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(2, groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=Group.objects.exclude(name='anonymous')) @patch('geonode.people.models.Profile.group_list_all', return_value=[1]) def test_anonymous_user(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': AnonymousUser() } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(1, groups.count()) class TestGroupProfileResAuthorization(GeoNodeBaseTestSupport): # Group fixture is loaded in base class @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=GroupProfile.objects.all()) def test_super_admin_user(self, super_mock): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test', is_staff=True, is_superuser=True) } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupProfileAuthorization().read_list([], mock_bundle) self.assertEqual(GroupProfile.objects.all().count(), groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=GroupProfile.objects.all()) @patch('geonode.people.models.Profile.group_list_all', return_value=[2]) def test_regular_user_hide_private(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test') } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupProfileAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(1, groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=GroupProfile.objects.all()) @patch('geonode.people.models.Profile.group_list_all', return_value=[1]) def test_regular_user(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': Profile(username='test') } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupProfileAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(2, groups.count()) @patch('geonode.api.authorization.ApiLockdownAuthorization.read_list', return_value=GroupProfile.objects.all()) @patch('geonode.people.models.Profile.group_list_all', return_value=[1]) def test_anonymous_user(self, super_mock, mocked_profile): mock_bundle = MagicMock() request_mock = MagicMock() r_attr = { 'user': AnonymousUser() } attrs = { 'request': request_mock } request_mock.configure_mock(**r_attr) mock_bundle.configure_mock(**attrs) groups = GroupProfileAuthorization().read_list(['not_empty_but_fake'], mock_bundle) self.assertEqual(1, groups.count())
39.763006
115
0.663759
755
6,879
5.809272
0.177483
0.05472
0.047196
0.051072
0.817601
0.804378
0.78021
0.78021
0.78021
0.78021
0
0.003121
0.20817
6,879
172
116
39.994186
0.802093
0.102922
0
0.784615
0
0
0.167999
0.123876
0
0
0
0
0.061538
1
0.061538
false
0
0.046154
0
0.123077
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7accded58736a8b95e657e9e3cfdf21158b76e9c
95
py
Python
src/sage/groups/additive_abelian/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
5
2015-01-04T07:15:06.000Z
2022-03-04T15:15:18.000Z
src/sage/groups/additive_abelian/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
null
null
null
src/sage/groups/additive_abelian/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
10
2016-09-28T13:12:40.000Z
2022-02-12T09:28:34.000Z
from additive_abelian_group import AdditiveAbelianGroup from additive_abelian_wrapper import *
31.666667
55
0.905263
11
95
7.454545
0.636364
0.292683
0.463415
0
0
0
0
0
0
0
0
0
0.084211
95
2
56
47.5
0.942529
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
7ae5212139957b530a4a0daaba4582fec8fbf5d8
15,136
py
Python
restclients/test/util/date_formator.py
uw-it-cte/uw-restclients
2b09348bf066e5508304401f93f281805e965af5
[ "Apache-2.0" ]
null
null
null
restclients/test/util/date_formator.py
uw-it-cte/uw-restclients
2b09348bf066e5508304401f93f281805e965af5
[ "Apache-2.0" ]
null
null
null
restclients/test/util/date_formator.py
uw-it-cte/uw-restclients
2b09348bf066e5508304401f93f281805e965af5
[ "Apache-2.0" ]
null
null
null
from django.test import TestCase from datetime import date, datetime, timedelta from restclients.util.date_formator import full_month_date_str from restclients.util.date_formator import abbr_month_date_time_str from restclients.util.date_formator import abbr_week_month_day_str from restclients.util.date_formator import last_midnight, time_str, is_today from restclients.util.date_formator import is_days_ago, get_past_weeks_count from restclients.util.date_formator import get_past_months_count from restclients.util.date_formator import get_past_years_count from restclients.util.date_formator import past_datetime_str class FormatorTest(TestCase): def test_full_month_date_str(self): self.assertEquals(full_month_date_str(date(2014, 7, 4)), 'July 4, 2014') self.assertEquals(full_month_date_str(date(2014, 6, 12)), 'June 12, 2014') def test_time_str(self): self.assertEquals(time_str(datetime(2014, 7, 4, 0, 0)), '12:00 AM') self.assertEquals(time_str(datetime(2014, 7, 4, 3, 3)), '3:03 AM') self.assertEquals(time_str(datetime(2014, 7, 4, 12, 0)), '12:00 PM') self.assertEquals(time_str(datetime(2014, 7, 4, 13, 3)), '1:03 PM') self.assertEquals(time_str(datetime(2014, 7, 4, 23, 59)), '11:59 PM') def test_abbr_month_date_time_str(self): self.assertEquals(abbr_month_date_time_str(datetime(2014, 7, 4, 3, 3)), 'Jul 4, 2014 at 3:03 AM') self.assertEquals(abbr_month_date_time_str(datetime(2014, 6, 12, 17, 55)), 'Jun 12, 2014 at 5:55 PM') def test_abbr_week_month_day_str(self): self.assertEquals(abbr_week_month_day_str(datetime(2014, 7, 4)), 'Fri, Jul 4') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 12)), 'Thu, Jun 12') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 14)), 'Sat, Jun 14') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 15)), 'Sun, Jun 15') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 16)), 'Mon, Jun 16') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 17)), 'Tue, Jun 17') self.assertEquals(abbr_week_month_day_str(datetime(2014, 6, 18)), 'Wed, Jun 18') def test_last_midnight(self): now = datetime.now() self.assertEquals(last_midnight(), datetime(now.year, now.month, now.day, 0, 0, 0, 0)) def test_is_today(self): now = datetime.now() self.assertTrue(is_today(now)) self.assertTrue(is_today(last_midnight())) self.assertTrue(is_today(last_midnight()+timedelta(seconds=1))) self.assertFalse(is_today(last_midnight()-timedelta(seconds=1))) self.assertFalse(is_today(now-timedelta(days=1))) def test_is_yesterday(self): now = datetime.now() self.assertFalse(is_days_ago(last_midnight()+timedelta(seconds=1), 1)) self.assertTrue(is_days_ago(last_midnight(), 1)) self.assertTrue(is_days_ago(now-timedelta(days=1), 1)) self.assertTrue( is_days_ago(last_midnight()-timedelta(days=1)+timedelta(seconds=1), 1)) self.assertTrue(is_days_ago(last_midnight()-timedelta(days=1), 1)) self.assertFalse( is_days_ago(last_midnight()-timedelta(days=1)-timedelta(seconds=1), 1)) self.assertFalse(is_days_ago(now-timedelta(days=2), 1)) def test_is_days_ago(self): now = datetime.now() self.assertTrue(is_days_ago(last_midnight()-timedelta(days=1), 2)) self.assertTrue( is_days_ago(last_midnight()-timedelta(days=1)-timedelta(seconds=1), 2)) self.assertTrue(is_days_ago(now-timedelta(days=2), 2)) self.assertTrue(is_days_ago(last_midnight()-timedelta(days=2), 2)) self.assertFalse( is_days_ago(last_midnight()-timedelta(days=2)-timedelta(seconds=1), 2)) self.assertTrue(is_days_ago(now-timedelta(days=3), 3)) self.assertTrue(is_days_ago(now-timedelta(days=4), 4)) self.assertTrue(is_days_ago(now-timedelta(days=5), 5)) self.assertTrue(is_days_ago(now-timedelta(days=6), 6)) def test_is_a_week_ago(self): now = datetime.now() self.assertFalse( is_days_ago(last_midnight()-timedelta(days=6)+timedelta(seconds=1),7)) self.assertTrue(is_days_ago(last_midnight()-timedelta(days=6),7)) self.assertTrue( is_days_ago(last_midnight()-timedelta(days=7)+timedelta(seconds=1),7)) self.assertTrue(is_days_ago(now-timedelta(weeks=1),7)) self.assertTrue(is_days_ago(now-timedelta(days=7),7)) self.assertFalse( is_days_ago(last_midnight()-timedelta(days=7)-timedelta(seconds=1),7)) self.assertFalse(is_days_ago(now-timedelta(days=8),7)) def test_is_over_1_week_ago(self): now = datetime.now() self.assertFalse( get_past_weeks_count(now-timedelta(days=7))==1) self.assertFalse( get_past_weeks_count( last_midnight()-timedelta(days=7)+timedelta(seconds=1))==1) self.assertTrue( get_past_weeks_count(last_midnight()-timedelta(days=7))==1) self.assertTrue(get_past_weeks_count(now-timedelta(days=8))==1) self.assertTrue(get_past_weeks_count(now-timedelta(days=14))==1) self.assertTrue( get_past_weeks_count( last_midnight()-timedelta(days=14)+timedelta(seconds=1))==1) self.assertFalse( get_past_weeks_count(last_midnight()-timedelta(days=14))==1) self.assertFalse(get_past_weeks_count(now-timedelta(days=15))==1) def test_is_over_2_weeks_ago(self): now = datetime.now() self.assertFalse(get_past_weeks_count(now-timedelta(days=14))==2) self.assertTrue(get_past_weeks_count(last_midnight()-timedelta(days=14))==2) self.assertTrue(get_past_weeks_count(now-timedelta(days=15))==2) self.assertTrue( get_past_weeks_count( last_midnight()-timedelta(days=21)+timedelta(seconds=1))==2) self.assertTrue(get_past_weeks_count(now-timedelta(days=21))==2) self.assertTrue( get_past_weeks_count( last_midnight()-timedelta(days=21)+timedelta(seconds=1))==2) self.assertFalse( get_past_weeks_count(last_midnight()-timedelta(days=21))==2) self.assertFalse(get_past_weeks_count(now-timedelta(days=22))==2) def test_is_over_3_weeks_ago(self): now = datetime.now() self.assertFalse(get_past_weeks_count(now-timedelta(days=21))==3) self.assertTrue(get_past_weeks_count(last_midnight()-timedelta(days=21))==3) self.assertTrue(get_past_weeks_count(now-timedelta(days=22))==3) self.assertTrue(get_past_weeks_count(now-timedelta(days=27))==3) self.assertTrue(get_past_weeks_count(now-timedelta(days=28))==3) self.assertTrue( get_past_weeks_count( last_midnight()-timedelta(days=28)+timedelta(seconds=1))==3) self.assertFalse( get_past_weeks_count(last_midnight()-timedelta(days=28))==3) self.assertFalse(get_past_weeks_count(now-timedelta(days=29))==3) def test_is_over_1_month_ago(self): now = datetime.now() self.assertFalse( get_past_months_count( last_midnight()-timedelta(days=28)+timedelta(seconds=1))==1) self.assertTrue(get_past_months_count(last_midnight()-timedelta(days=28))==1) self.assertTrue(get_past_months_count(now-timedelta(days=29))==1) self.assertTrue(get_past_months_count(now-timedelta(days=56))==1) self.assertTrue( get_past_months_count( last_midnight()-timedelta(days=56)+timedelta(seconds=1))==1) self.assertFalse( get_past_months_count(last_midnight()-timedelta(days=56))==1) self.assertFalse(get_past_months_count(now-timedelta(days=57))==1) def test_is_over_2_months_ago(self): now = datetime.now() self.assertFalse( get_past_months_count( last_midnight()-timedelta(days=56)+timedelta(seconds=1))==2) self.assertTrue( get_past_months_count(last_midnight()-timedelta(days=56))==2) self.assertTrue(get_past_months_count(now-timedelta(days=57))==2) self.assertTrue(get_past_months_count(now-timedelta(days=84))==2) self.assertTrue( get_past_months_count( last_midnight()-timedelta(days=84)+timedelta(seconds=1))==2) self.assertFalse( get_past_months_count(last_midnight()-timedelta(weeks=12))==2) self.assertFalse(get_past_months_count(now-timedelta(days=85))==2) def test_is_over_3_months_ago(self): now = datetime.now() self.assertFalse(get_past_months_count(now-timedelta(days=84))==3) self.assertFalse( get_past_months_count( last_midnight()-timedelta(days=84)+timedelta(seconds=1))==3) self.assertTrue(get_past_months_count(last_midnight()-timedelta(days=84))==3) self.assertTrue(get_past_months_count(now-timedelta(days=85))==3) self.assertTrue(get_past_months_count(now-timedelta(days=112))==3) self.assertTrue( get_past_months_count( last_midnight()-timedelta(days=112)+timedelta(seconds=1))==3) self.assertFalse(get_past_months_count(last_midnight()-timedelta(days=112))==3) self.assertFalse(get_past_months_count(now-timedelta(days=113))==3) def test_is_over_1_year_ago(self): now = datetime.now() self.assertFalse(get_past_years_count(now-timedelta(days=365))==1) self.assertTrue(get_past_years_count(last_midnight()-timedelta(days=365))==1) self.assertTrue(get_past_years_count(now-timedelta(days=366))==1) self.assertTrue(get_past_years_count(now-timedelta(days=365*2))==1) self.assertTrue(get_past_years_count(last_midnight()-timedelta(days=365*2))==2) self.assertFalse(get_past_years_count(now-timedelta(days=731))==1) def test_is_over_2_years_ago(self): now = datetime.now() self.assertFalse(get_past_years_count(now-timedelta(days=365*2))==2) self.assertTrue(get_past_years_count(last_midnight()-timedelta(days=365*2))==2) self.assertTrue(get_past_years_count(now-timedelta(days=731))==2) self.assertTrue(get_past_years_count(now-timedelta(days=365*3))==2) self.assertFalse(get_past_years_count(last_midnight()-timedelta(days=365*3))==2) self.assertFalse(get_past_years_count(now-timedelta(days=1096))==2) def test_past_datetime_str_today(self): now = datetime.now() t1 = datetime(now.year, now.month, now.day, 0, 0, 0) self.assertEquals(past_datetime_str(t1), 'today at 12:00 AM') t1 = datetime(now.year, now.month, now.day, 1) self.assertEquals(past_datetime_str(t1), 'today at 1:00 AM') t1 = datetime(now.year, now.month, now.day, 11) self.assertEquals(past_datetime_str(t1), 'today at 11:00 AM') t1 = datetime(now.year, now.month, now.day, 12) self.assertEquals(past_datetime_str(t1), 'today at 12:00 PM') t1 = datetime(now.year, now.month, now.day, 13) self.assertEquals(past_datetime_str(t1), 'today at 1:00 PM') def test_past_datetime_str_yesterday(self): day = datetime.now() - timedelta(days=1) t2 = datetime(day.year, day.month, day.day, 0, 0, 0) self.assertEquals(past_datetime_str(t2), 'yesterday at 12:00 AM') t2 = datetime(day.year, day.month, day.day, 1,) self.assertEquals(past_datetime_str(t2), 'yesterday at 1:00 AM') def test_past_datetime_str_days_ago(self): day = datetime.now() - timedelta(days=2) self.assertEquals(past_datetime_str(day), '2 days ago') day = datetime.now() - timedelta(days=3) self.assertEquals(past_datetime_str(day), '3 days ago') day = datetime.now() - timedelta(days=4) self.assertEquals(past_datetime_str(day), '4 days ago') day = datetime.now() - timedelta(days=5) self.assertEquals(past_datetime_str(day), '5 days ago') day = datetime.now() - timedelta(days=6) self.assertEquals(past_datetime_str(day), '6 days ago') def test_past_datetime_str_a_week_ago(self): day = datetime.now() - timedelta(days=7) self.assertEquals(past_datetime_str(day), '1 week ago') def test_past_datetime_str_over_weeks_ago(self): day = datetime.now() - timedelta(days=8) self.assertEquals(past_datetime_str(day), 'over 1 week ago') day = datetime.now() - timedelta(days=14) self.assertEquals(past_datetime_str(day), 'over 1 week ago') day = last_midnight() - timedelta(days=14) self.assertEquals(past_datetime_str(day), 'over 2 weeks ago') day = datetime.now() - timedelta(days=15) self.assertEquals(past_datetime_str(day), 'over 2 weeks ago') day = datetime.now() - timedelta(days=21) self.assertEquals(past_datetime_str(day), 'over 2 weeks ago') day = last_midnight() - timedelta(days=21) self.assertEquals(past_datetime_str(day), 'over 3 weeks ago') day = datetime.now() - timedelta(days=22) self.assertEquals(past_datetime_str(day), 'over 3 weeks ago') day = datetime.now() - timedelta(days=28) self.assertEquals(past_datetime_str(day), 'over 3 weeks ago') def test_past_datetime_str_over_months_ago(self): day = datetime.now() - timedelta(days=29) self.assertEquals(past_datetime_str(day), 'over 1 month ago') day = datetime.now() - timedelta(days=56) self.assertEquals(past_datetime_str(day), 'over 1 month ago') day = datetime.now() - timedelta(days=57) self.assertEquals(past_datetime_str(day), 'over 2 months ago') day = datetime.now() - timedelta(days=84) self.assertEquals(past_datetime_str(day), 'over 2 months ago') day = datetime.now() - timedelta(days=85) self.assertEquals(past_datetime_str(day), 'over 3 months ago') day = datetime.now() - timedelta(days=112) self.assertEquals(past_datetime_str(day), 'over 3 months ago') def test_past_datetime_str_over_years_ago(self): day = datetime.now() - timedelta(days=366) self.assertEquals(past_datetime_str(day), 'over 1 year ago') day = datetime.now() - timedelta(days=730) self.assertEquals(past_datetime_str(day), 'over 1 year ago') day = datetime.now() - timedelta(days=731) self.assertEquals(past_datetime_str(day), 'over 2 years ago') day = datetime.now() - timedelta(days=1095) self.assertEquals(past_datetime_str(day), 'over 2 years ago')
47.152648
88
0.661535
2,094
15,136
4.532951
0.047278
0.141066
0.10788
0.102718
0.931732
0.905499
0.84724
0.788348
0.711336
0.602823
0
0.048278
0.207651
15,136
320
89
47.3
0.743184
0
0
0.276515
0
0
0.04334
0
0
0
0
0
0.507576
1
0.090909
false
0
0.037879
0
0.132576
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7