hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f0ae3a72599834b4479605760b0a52f2f487933
| 3,086
|
py
|
Python
|
m04_networking/napalm_config_example.py
|
Brunner-Kibali/test-python-52
|
52ef44471ee003536ce002bb22ea654789dc8489
|
[
"MIT"
] | null | null | null |
m04_networking/napalm_config_example.py
|
Brunner-Kibali/test-python-52
|
52ef44471ee003536ce002bb22ea654789dc8489
|
[
"MIT"
] | null | null | null |
m04_networking/napalm_config_example.py
|
Brunner-Kibali/test-python-52
|
52ef44471ee003536ce002bb22ea654789dc8489
|
[
"MIT"
] | null | null | null |
import napalm
import filecmp
import difflib
import sys
print("\n----- connecting to device, comparing configs ----------")
driver = napalm.get_network_driver('ios')
with driver(hostname='192.168.254.200',
username='cisco',
password='cisco') as device:
# print("\n----- load_replace_candidate ----------")
# config = device.get_config()
# config_running = config["running"]
# with open("cisco.ios.running.CML-iosv-0.config.config", "r") as config_out:
# config_standard = config_out.read()
#
# cmp = filecmp.cmp("cisco.iosxe.standard.config", "cisco.iosxe.running.config", shallow=False)
# difflines = difflib.context_diff(config_running.splitlines(), config_standard.splitlines(), fromfile="running", tofile="standard")
# for line in difflines:
# print(line)
#
device.load_merge_candidate(filename="cisco.ios.running.CML-iosv-0.config")
diff = device.compare_config()
print("----- DIFF ------------\n", diff)
# print("\n----- connecting to device ----------")
# driver = napalm.get_network_driver('ios')
# with driver(hostname='ios-xe-mgmt-latest.cisco.com',
# username='developer',
# password='C1sco12345',
# optional_args={'port': 8181}) as device:
#
# print("\n----- load_replace_candidate ----------")
# config = device.get_config()
# config_running = config["running"]
#
# with open("cisco.iosxe.standard.config", "r") as config_out:
# config_standard = config_out.read()
# with open("cisco.iosxe.running.config", "w") as config_out:
# config_out.write(config["running"])
#
# cmp = filecmp.cmp("cisco.iosxe.standard.config", "cisco.iosxe.running.config", shallow=False)
# difflines = difflib.context_diff(config_running.splitlines(), config_standard.splitlines(), fromfile="running", tofile="standard")
# for line in difflines:
# print(line)
#
# # device.load_replace_candidate(filename="cisco.iosxe.standard.config")
# # diff = device.compare_config()
# # print(diff)
#
# driver = napalm.get_network_driver('nxos_ssh')
# with driver(hostname='sbx-nxos-mgmt.cisco.com',
# username='admin',
# password='Admin_1234!',
# optional_args={'port': 8181}) as device:
#
# print("\n----- load_replace_candidate ----------")
# config = device.get_config()
# config_running = config["running"]
#
# with open("cisco.nxos.standard.config", "r") as config_out:
# config_standard = config_out.read()
# with open("cisco.nxos.running.config", "w") as config_out:
# config_out.write(config["running"])
#
# cmp = filecmp.cmp("cisco.nxos.standard.config", "cisco.nxos.running.config", shallow=False)
# difflines = difflib.context_diff(config_running.splitlines(), config_standard.splitlines(), fromfile="running", tofile="standard")
# for line in difflines:
# print(line)
#
# # device.load_replace_candidate(filename="cisco.nxos.standard.config")
# # diff = device.compare_config()
# # print(diff)
#
| 39.063291
| 136
| 0.643552
| 357
| 3,086
| 5.414566
| 0.201681
| 0.073978
| 0.051733
| 0.043973
| 0.837558
| 0.798241
| 0.798241
| 0.754268
| 0.706674
| 0.655975
| 0
| 0.012608
| 0.177576
| 3,086
| 78
| 137
| 39.564103
| 0.749015
| 0.801361
| 0
| 0
| 0
| 0
| 0.267399
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.083333
| 0.333333
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 9
|
a028a05916b6b99e1f0340e115a106e0acf3e7e1
| 81
|
py
|
Python
|
tests/bind/good/break-in-while2.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | 1
|
2020-11-24T05:24:26.000Z
|
2020-11-24T05:24:26.000Z
|
tests/bind/good/break-in-while2.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | null | null | null |
tests/bind/good/break-in-while2.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | null | null | null |
while 1:
while 1:
while 1:
break
break
break
| 11.571429
| 17
| 0.407407
| 9
| 81
| 3.666667
| 0.333333
| 0.545455
| 0.666667
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.555556
| 81
| 6
| 18
| 13.5
| 0.833333
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
261687e910e133459402ad1f569fee8c27e7ea89
| 20,669
|
py
|
Python
|
old/src/callbacks.py
|
eshimizu335/PenguinViewer
|
f86db138605e8e28b0ac89d1229fb088e18c8cf2
|
[
"MIT"
] | 1
|
2021-03-05T02:19:05.000Z
|
2021-03-05T02:19:05.000Z
|
old/src/callbacks.py
|
eshimizu335/PenguinViewer
|
f86db138605e8e28b0ac89d1229fb088e18c8cf2
|
[
"MIT"
] | null | null | null |
old/src/callbacks.py
|
eshimizu335/PenguinViewer
|
f86db138605e8e28b0ac89d1229fb088e18c8cf2
|
[
"MIT"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
from app import app
from layouts import default_stylesheet, universe_stylesheet, flower_stylesheet
from read import check_model
import table
# 物理図上でマウスをかざしたノードのデータ辞書を表示し、ドロップダウンでスタイルを変更するコールバック関数
@app.callback(
Output('graph_p', 'stylesheet'), # 出力先IDとして物理図のID'graph_p'を指定。属性stylesheetに出力する
Output('html_p', 'style'), # 出力先IDとして物理図ページのID'hmtl_p'を指定。属性styleに出力する
Output('left_p', 'style'),
Output('right_p', 'style'),
[Input('graph_p', 'mouseoverNodeData')], # マウスをかざしたノードのデータ辞書を受け取る
[Input('graph_p', 'tapEdgeData')], # クリックしたエッジのデータ辞書を受け取る
[Input('graph_p', 'tapNodeData')], # クリックしたノードのデータ辞書を受け取る
[Input('theme_dropdown', 'value')], # ドロップダウンコンポーネントのID'theme_dropdown'を指定。属性valueを渡す
)
def update_graph_p(node_data_dict, edge_data_dict, clicked_node_dict, theme): # グラフのテーマを変える
# 選択したテーマ用のスタイル
if theme == 'universe':
selected_stylesheet = universe_stylesheet
wstyle = {'backgroundColor': 'black'} # ページ全体の背景
elif theme == 'flower':
selected_stylesheet = flower_stylesheet
wstyle = {'backgroundColor': '#f5ecf4'}
else:
selected_stylesheet = default_stylesheet
wstyle = {'backgroundColor': '#D7EEFF'}
if node_data_dict: # ノードにマウスがかざされたとき
if node_data_dict['model'] == 'L3switch': # かざしたノードがL3だったら
# ノードの枠線およびラベル表示変更(機種名表示)
style_with_label = {'selector': '#' + node_data_dict['id'],
'style': {
'label': check_model(node_data_dict['id']),
'border-width': 5,
'border-style': 'double',
'border-color': '#8b008b',
'border-opacity': 0.9}}
else: # かざしたノードがL2だったら
style_with_label = {'selector': '#' + node_data_dict['id'],
'style': {
'label': check_model(node_data_dict['id']),
'border-width': 5,
'border-style': 'double',
'border-color': '#da70d6',
'border-opacity': 0.9}}
if clicked_node_dict: # ノードがクリックされたら
left_style = {'width': '70%', 'overflow': 'auto'}
right_style = {'width': '30%', 'overflow': 'auto'}
if clicked_node_dict['model'] == 'L3switch': # クリックされたのがL3だったら
# ノードの色変更
style_node_clicked = {'selector': '#' + clicked_node_dict['id'],
'style': {
'backgroundColor': '#8b008b'}}
else: # クリックされたのがL2だったら
# ノードの色変更
style_node_clicked = {'selector': '#' + clicked_node_dict['id'],
'style': {
'backgroundColor': '#da70d6'}}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら
# sh interfaces statusの出力からケーブルの種類を判断
if source_info['Port'] == edge_data_dict['srcport']:
if source_info['Type'] == '10/100BaseTX':
cable = 'UTP-1GB'
elif source_info['Type'] == '10GBASE-T':
cable = 'UTP-10GB'
elif source_info['Type'] == '1000BASE-SX':
cable = '光マルチ-1GB'
elif source_info['Type'] == '1000BASE-LX':
cable = '光シングル-1GB'
elif source_info['Type'] == '10GBASE-SR':
cable = '光マルチ-10GB'
elif source_info['Type'] == '10GBASE-LR':
cable = '光シングル-10GB'
else:
cable = 'ケーブル不明'
# エッジの太さ変更とラベル表示(ケーブル情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': cable,
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_node_clicked] + [
style_edge_label]
return updated_stylesheet, wstyle, left_style, right_style
else:
pass
else: # edgeがクリックされなければ
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_node_clicked]
return updated_stylesheet, wstyle, left_style, right_style
else: # ノードがクリックされてなかったら
left_style = {'width': '100%'}
right_style = {'width': '0%'}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら(cdpの出力はインターフェース名にスペース入ってるのでreplaceで消す)
# sh interfaces statusの出力からケーブルの種類を判断
if source_info['Port'] == edge_data_dict['srcport'].replace(' ', ''):
if source_info['Type'] == '10/100BaseTX':
cable = 'UTP-1GB'
elif source_info['Type'] == '10GBASE-T':
cable = 'UTP-10GB'
elif source_info['Type'] == '1000BASE-SX':
cable = '光マルチ-1GB'
elif source_info['Type'] == '1000BASE-LX':
cable = '光シングル-1GB'
elif source_info['Type'] == '10GBASE-SR':
cable = '光マルチ-10GB'
elif source_info['Type'] == '10GBASE-LR':
cable = '光シングル-10GB'
else:
cable = 'ケーブル不明'
# エッジの太さ変更とラベル表示(ケーブル情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': cable,
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_edge_label]
return updated_stylesheet, wstyle, left_style, right_style
else: # edgeがクリックされなければ
updated_stylesheet = selected_stylesheet + [style_with_label]
return updated_stylesheet, wstyle, left_style, right_style
else: # ノードにマウスがかざされていないとき
left_style = {'width': '100%'}
right_style = {'width': '0%'}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら(cdpの出力はインターフェース名にスペース入ってるのでreplaceで消す)
# sh interfaces statusの出力からケーブルの種類を判断
if source_info['Port'] == edge_data_dict['srcport'].replace(' ', ''):
if source_info['Type'] == '10/100BaseTX':
cable = 'UTP-1GB'
elif source_info['Type'] == '10GBASE-T':
cable = 'UTP-10GB'
elif source_info['Type'] == '1000BASE-SX':
cable = '光マルチ-1GB'
elif source_info['Type'] == '1000BASE-LX':
cable = '光シングル-1GB'
elif source_info['Type'] == '10GBASE-SR':
cable = '光マルチ-10GB'
elif source_info['Type'] == '10GBASE-LR':
cable = '光シングル-10GB'
else:
cable = 'ケーブル不明'
# エッジの太さ変更とラベル表示(ケーブル情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': cable,
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
edge_updated_stylesheet = selected_stylesheet + [style_edge_label]
return edge_updated_stylesheet, wstyle, left_style, right_style
else:
pass
else: # 何も起きていなければ
return selected_stylesheet, wstyle, left_style, right_style
# 論理図上でマウスをかざしたノードのデータ辞書を表示するコールバック関数と、ドロップダウンでスタイルを変更するコールバック関数
@app.callback(
Output('graph_l', 'stylesheet'),
Output('html_l', 'style'),
Output('left_l', 'style'),
Output('right_l', 'style'),
[Input('graph_l', 'mouseoverNodeData')], # マウスをかざしたノードのデータ辞書を受け取る
[Input('graph_l', 'tapEdgeData')], # クリックしたエッジのデータ辞書を受け取る
[Input('graph_l', 'tapNodeData')], # クリックしたノードのデータ辞書を受け取る
[Input('theme_dropdown', 'value')],
)
def update_graph_l(node_data_dict, edge_data_dict, clicked_node_dict, theme): # グラフのテーマと画面全体のスタイルを変える
# 選択したテーマ用のスタイル
if theme == 'universe' == 'universe':
selected_stylesheet = universe_stylesheet
wstyle = {'backgroundColor': 'black'}
elif theme == 'flower':
selected_stylesheet = flower_stylesheet
wstyle = {'backgroundColor': '#f5ecf4'}
else:
selected_stylesheet = default_stylesheet
wstyle = {'backgroundColor': '#D7EEFF'}
if node_data_dict: # ノードにマウスがかざされたとき
if node_data_dict['model'] == 'L3switch': # かざしたノードがL3だったら
# ノードの枠線およびラベル表示変更(機種名表示)
style_with_label = {'selector': '#' + node_data_dict['id'],
'style': {
'label': check_model(node_data_dict['id']),
'border-width': 5,
'border-style': 'double',
'border-color': '#8b008b',
'border-opacity': 0.9}}
else: # かざしたノードがL2だったら
style_with_label = {'selector': '#' + node_data_dict['id'],
'style': {
'label': check_model(node_data_dict['id']),
'border-width': 5,
'border-style': 'double',
'border-color': '#da70d6',
'border-opacity': 0.9}}
if clicked_node_dict: # ノードがクリックされたら
left_style = {'width': '70%', 'overflow': 'auto'}
right_style = {'width': '30%', 'overflow': 'auto'}
if clicked_node_dict['model'] == 'L3switch': # クリックされたのがL3だったら
# ノードの色変更
style_node_clicked = {'selector': '#' + clicked_node_dict['id'],
'style': {
'backgroundColor': '#8b008b'}}
else: # クリックされたのがL2だったら
# ノードの色変更
style_node_clicked = {'selector': '#' + clicked_node_dict['id'],
'style': {
'backgroundColor': '#da70d6'}}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(
edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら(cdpの出力はインターフェース名にスペース入ってるのでreplaceで消す)
if source_info['Port'] == edge_data_dict['srcport'].replace(' ', ''):
# エッジの太さ変更とラベル表示(Vlan情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': 'Vlan' + source_info['Vlan'],
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_node_clicked] + [
style_edge_label]
return updated_stylesheet, wstyle, left_style, right_style
else:
pass
else: # edgeがクリックされなければ
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_node_clicked]
return updated_stylesheet, wstyle, left_style, right_style
else: # ノードがクリックされてなかったら
left_style = {'width': '100%'}
right_style = {'width': '0%'}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(
edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら(cdpの出力はインターフェース名にスペース入ってるのでreplaceで消す)
if source_info['Port'] == edge_data_dict['srcport'].replace(' ', ''):
# エッジの太さ変更とラベル表示(Vlan情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': 'Vlan' + source_info['Vlan'],
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
updated_stylesheet = selected_stylesheet + [style_with_label] + [style_edge_label]
return updated_stylesheet, wstyle
else: # edgeがクリックされなければ
updated_stylesheet = selected_stylesheet + [style_with_label]
return updated_stylesheet, wstyle, left_style, right_style
else: # ノードにマウスがかざされていないとき
left_style = {'width': '100%'}
right_style = {'width': '0%'}
if edge_data_dict: # edgeがクリックされたら
source_info_list = table.format_data_interface(edge_data_dict['source']) # sourceのインターフェース情報を取得
for source_info in source_info_list:
# srcportと同じインターフェースだったら(cdpの出力はインターフェース名にスペース入ってるのでreplaceで消す)
if source_info['Port'] == edge_data_dict['srcport'].replace(' ', ''):
# エッジの太さ変更とラベル表示(Vlan情報)
style_edge_label = {'selector': '#' + edge_data_dict['id'],
'style': {
'width': 10,
'label': 'Vlan' + source_info['Vlan'],
'text-background-color': 'white',
'text-background-opacity': 0.9,
'text-background-shape': 'rectangle',
'text-background-padding': '3px',
'font-size': 20}}
edge_updated_stylesheet = selected_stylesheet + [style_edge_label]
return edge_updated_stylesheet, wstyle, left_style, right_style
else:
pass
else: # 何も起きていなければ
return selected_stylesheet, wstyle, left_style, right_style
# 物理図上でクリックしたノードの各種情報を表示するコールバック関数
@app.callback(
Output('table_p', 'children'), # 出力先IDとしてright_partのidを指定
Output('table_title_p', 'children'),
Output('command_dropdown_p', 'style'),
[Input('graph_p', 'tapNodeData')], # クリックされたノードのデータ辞書を受け取る
[Input('command_dropdown_p', 'value')] # ドロップダウンで選択されたコマンドを受け取る
)
def show_command_dropdown(clicked_node_physical, command):
if clicked_node_physical:
clicked_node_name = clicked_node_physical['id']
table_ttl = clicked_node_name # テーブルエリアのトップに表示するノード名
dropdown_style = {'visibility': 'visible'}
if command == 'port':
table_data = table.make_table_interface(table.format_data_interface(clicked_node_physical['id']))
elif command == 'mac':
table_data = table.make_table_mac(table.format_data_mac(clicked_node_physical['id']))
elif command == 'route':
table_data = table.make_table_iproute(table.format_data_iproute(clicked_node_physical['id']))
elif command == 'ip_int':
table_data = table.make_table_ip_int(table.format_data_ip_int(clicked_node_physical['id']))
# elif command == 'run':
# table_data = table.make_table_run(table.format_data_run(node['id']))
else:
table_data = table.make_table_interface(table.format_data_interface(clicked_node_physical['id']))
# タイトル、ドロップダウン、sh interfaces statusの出力を返す
return table_data, table_ttl, dropdown_style
else: # ノードがクリックされていない時はコールバックの更新を停止
raise dash.exceptions.PreventUpdate
# 論理図上でクリックしたノードの各種情報を表示するコールバック関数
@app.callback(
Output('table_l', 'children'), # 出力先IDとしてright_partのidを指定
Output('table_title_l', 'children'),
Output('command_dropdown_l', 'style'),
[Input('graph_l', 'tapNodeData')], # クリックされたノードのデータ辞書を受け取る
[Input('command_dropdown_l', 'value')] # ドロップダウンで選択されたコマンドを受け取る
)
def show_command_dropdown(clicked_node_logical, command):
if clicked_node_logical:
clicked_node_name = clicked_node_logical['id']
table_ttl = clicked_node_name # テーブルエリアのトップに表示するノード名
dropdown_style = {'visibility': 'visible'}
if command == 'port':
table_data = table.make_table_interface(table.format_data_interface(clicked_node_logical['id']))
elif command == 'mac':
table_data = table.make_table_mac(table.format_data_mac(clicked_node_logical['id']))
elif command == 'route':
table_data = table.make_table_iproute(table.format_data_iproute(clicked_node_logical['id']))
elif command == 'ip_int':
table_data = table.make_table_ip_int(table.format_data_ip_int(clicked_node_logical['id']))
# elif command == 'run':
# table_data = table.make_table_run(table.format_data_run(node['id']))
else:
table_data = table.make_table_interface(table.format_data_interface(clicked_node_logical['id']))
# タイトル、ドロップダウン、sh interfaces statusの出力を返す
return table_data, table_ttl, dropdown_style
else: # ノードがクリックされていない時はコールバックの更新を停止
raise dash.exceptions.PreventUpdate
| 55.412869
| 112
| 0.497847
| 1,672
| 20,669
| 5.864833
| 0.107057
| 0.04589
| 0.031817
| 0.027534
| 0.896492
| 0.864063
| 0.838874
| 0.838874
| 0.81236
| 0.81236
| 0
| 0.016722
| 0.398181
| 20,669
| 372
| 113
| 55.561828
| 0.771605
| 0.108085
| 0
| 0.851852
| 0
| 0
| 0.153339
| 0.028782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012346
| false
| 0.012346
| 0.018519
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
269471f5e68b928ef3dc41c9e98dee5b585f9796
| 182
|
py
|
Python
|
ignite/_utils.py
|
Juddd/ignite
|
00a208a4e7a7783e9ddac18931085fca2f0dec47
|
[
"BSD-3-Clause"
] | 4,119
|
2017-11-23T18:10:37.000Z
|
2022-03-31T05:31:27.000Z
|
ignite/_utils.py
|
Juddd/ignite
|
00a208a4e7a7783e9ddac18931085fca2f0dec47
|
[
"BSD-3-Clause"
] | 1,838
|
2017-11-24T11:19:25.000Z
|
2022-03-31T09:08:18.000Z
|
ignite/_utils.py
|
Juddd/ignite
|
00a208a4e7a7783e9ddac18931085fca2f0dec47
|
[
"BSD-3-Clause"
] | 691
|
2017-11-24T10:57:33.000Z
|
2022-03-29T02:19:44.000Z
|
# For compatibility
from ignite.utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot
__all__ = ["apply_to_tensor", "apply_to_type", "convert_tensor", "to_onehot"]
| 36.4
| 82
| 0.802198
| 27
| 182
| 4.814815
| 0.481481
| 0.215385
| 0.2
| 0.276923
| 0.692308
| 0.692308
| 0.692308
| 0.692308
| 0.692308
| 0.692308
| 0
| 0
| 0.093407
| 182
| 4
| 83
| 45.5
| 0.787879
| 0.093407
| 0
| 0
| 0
| 0
| 0.312883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
cd685172eb0064c251a65900c840dd7dacadfe14
| 8,593
|
py
|
Python
|
sync_binlog/analysis_rows.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 3
|
2018-09-18T03:29:33.000Z
|
2020-01-13T03:34:39.000Z
|
sync_binlog/analysis_rows.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | null | null | null |
sync_binlog/analysis_rows.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 1
|
2022-01-25T09:39:17.000Z
|
2022-01-25T09:39:17.000Z
|
# encoding=utf8
from sync_conf import encryption_column, encryption_db_column
from sync_binlog.AES_Encryption import ase_encryption
from decimal import Decimal
import datetime
def update_before_values(values, table_map=None):
return_values = ''
if encryption_column is False:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ' and ')
else:
table_name = str(table_map).replace('`', '').split('.')[1]
if table_name in encryption_db_column["table_column_map"]:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
if x in encryption_db_column["table_column_map"][table_name].replace(' ', '').split(','):
equal = "`%s` = '%s'" % (x, ase_encryption(v))
return_values += (equal + ' and ')
else:
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ' and ')
else:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ' and ')
return return_values.strip('and ')
def update_after_values(values, table_map=None):
return_values = ''
if encryption_column is False:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ', ')
else:
table_name = str(table_map).replace('`', '').split('.')[1]
if table_name in encryption_db_column["table_column_map"]:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
if x in encryption_db_column["table_column_map"][table_name].replace(' ', '').split(','):
equal = "`%s` = '%s'" % (x, ase_encryption(v))
return_values += (equal + ', ')
else:
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ', ')
else:
for x in values:
if type(values[x]) == bytes:
v = values[x].decode("utf-8")
else:
v = str(values[x])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
equal = "`%s` = '%s'" % (x, v)
return_values += (equal + ', ')
return return_values.strip(', ')
def insert_key_values(values, table_map=None):
k_value = ''
v_value = ''
if encryption_column is False:
for k in values:
k_value += ('`%s`,' % k)
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
v_value += ("'%s'" % str(v)+',')
else:
table_name = str(table_map).replace('`', '').split('.')[1]
if table_name in encryption_db_column["table_column_map"]:
for k in values: # {'id': 2, 'name': 'b'}
if k in encryption_db_column["table_column_map"][table_name].replace(' ', '').split(','):
k_value += ('`%s`,' % k)
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
v_value += ("'%s'" % ase_encryption(v)+',')
else:
k_value += ('`%s`,' % k)
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
v_value += ("'%s'" % v + ',')
else:
for k in values:
k_value += ('`%s`,' % k)
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
v_value += ("'%s'" % v + ',')
return k_value.strip(','), v_value.strip(',')
def delete_rows_values(values, table_map=None):
k_value = ''
if encryption_column is False:
for k in values:
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
k_value += ("`%s` = '%s'" % (k, v) + ' and ')
else:
table_name = str(table_map).replace('`', '').split('.')[1]
if table_name in encryption_db_column["table_column_map"]:
for k in values:
if type(values[k]) == bytes:
v = values[k].decode("utf-8")
else:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
if k in encryption_db_column["table_column_map"][table_name].replace(' ', '').split(','):
k_value += ("`%s` = '%s'" % (k, ase_encryption(v)) + ' and ')
else:
k_value += ("`%s` = '%s'" % (k, str(v)) + ' and ')
else:
for k in values:
v = str(values[k])
if "\\" in v:
v = v.replace("\\", "\\" * 2)
if "\n" in v:
v = v.replace('\n', '\\n')
if "'" in v:
v = v.replace("'", "\\'")
k_value += ("`%s` = '%s'" % (k, str(v)) + ' and ')
return k_value.strip(' and ')
| 38.533632
| 105
| 0.349587
| 917
| 8,593
| 3.161396
| 0.059978
| 0.053812
| 0.053812
| 0.067265
| 0.878579
| 0.87099
| 0.869955
| 0.850983
| 0.843049
| 0.831321
| 0
| 0.006729
| 0.463866
| 8,593
| 222
| 106
| 38.707207
| 0.622531
| 0.004189
| 0
| 0.919811
| 0
| 0
| 0.078443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.018868
| 0
| 0.056604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cd9a0dfe02d89a023daced66c0ba088f69df9d0e
| 42,237
|
py
|
Python
|
structured/Tools/OptionsUtilities/GenerateOptions.py
|
PabloIbannez/UAMMD-structured
|
897d7211c3d37123976a03bc5ffa545495d673cd
|
[
"MIT"
] | null | null | null |
structured/Tools/OptionsUtilities/GenerateOptions.py
|
PabloIbannez/UAMMD-structured
|
897d7211c3d37123976a03bc5ffa545495d673cd
|
[
"MIT"
] | null | null | null |
structured/Tools/OptionsUtilities/GenerateOptions.py
|
PabloIbannez/UAMMD-structured
|
897d7211c3d37123976a03bc5ffa545495d673cd
|
[
"MIT"
] | null | null | null |
optGeneric_template = '''
nSteps {nSteps}
nStepsInfoInterval {nStepsInterval}
nStepsWriteInterval {nStepsWrite}
nStepsBackupInterval {nStepsBackupInterval}
outPutFilePath {outPutFilePath}
outPutFormat {outPutFormat}
boxSize {boxX} {boxY} {boxZ}
T {temperature}
h {steepestDecentScale}
nStepsSteepestDescent {nStepsSteepestDescent}
nStepsSteepestDescentProgressInterval {nStepsSteepestDescentProgressInterval}
maxObjectiveForce {maxObjectiveForce}
dt {timeStep}
frictionConstant {frictionConstant}
cutOffDst {cutOffDst}
VerletListDst {VerletListDst}
inputCoordPath {inputCoordFile}
inputTopologyPath {inputTopFile}'''
optGenericWithElec_template = optGeneric_template+'''
dielectricConstant {dielectricConstant}
debyeLength {debyeLength}
'''
optGenericWithSurface_template = optGenericWithElec_template+'''
epsilonSurf {epsilonSurf}
sigmaSurf {sigmaSurf}
surfacePosition {surfacePosition}
'''
optGenericClash_template = optGeneric_template+'''
lambda {lambd}
gamma {gamma}
'''
optGenericClashWithCompression_template = optGenericClash_template+'''
initialSphereRadius {initialSphereRadius}
minimalSphereRadius {minimalSphereRadius}
compressionVelocity {compressionVelocity}
'''
optAFM_template = optGenericWithSurface_template+'''
frictionConstantTip {frictionConstantTip}
initialTipSampleDst {initialTipSampleDst}
descentVelocity {descentVelocity}
minimalChipHeight {minimalChipHeight}
Mtip {Mtip}
Rtip {Rtip}
Kxytip {Kxytip}
Ktip {Ktip}
epsilonTip {epsilonTip}
sigmaTip {sigmaTip}
Atip {Atip}
Btip {Btip}
epsilonTipSurf {epsilonTipSurf}
sigmaTipSurf {sigmaTipSurf}
ATipSurf {ATipSurf}
BTipSurf {BTipSurf}
nStepsIndentMeasure {nStepsIndentMeasure}
outputIndentationMeasureFilePath {outputIndentationMeasureFilePath}
'''
optGenericUmbrella_template = optGenericWithElec_template+'''
umbrellaK {umbrellaK}
umbrellaInit {umbrellaInit}
umbrellaEnd {umbrellaEnd}
umbrellaWindowsNumber {umbrellaWindowsNumber}
umbrellaCopies {umbrellaCopies}
nStepsUmbrellaMeasure {nStepsUmbrellaMeasure}
outputUmbrellaMeasureFilePath {outputUmbrellaMeasureFilePath}
'''
def writeOptionsGeneric(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
cutOffDst:float,
VerletListDst:float,
dielectricConstant:float,
debyeLength:float,
inputCoordFile:str,
inputTopFile:str):
opt = optGenericWithElec_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
dielectricConstant=dielectricConstant,
debyeLength=debyeLength,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile)
with open(path,"w") as f:
f.write(opt)
def writeOptionsGenericFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
dielectricConstant=optionsDict["dielectricConstant"]
debyeLength=optionsDict["debyeLength"]
inputCoordFile=optionsDict["inputCoordFile"]
inputTopFile=optionsDict["inputTopFile"]
writeOptionsGeneric(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
cutOffDst,
VerletListDst,
dielectricConstant,
debyeLength,
inputCoordFile,
inputTopFile)
def writeOptionsGenericWithSurface(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
cutOffDst:float,
VerletListDst:float,
dielectricConstant:float,
debyeLength:float,
inputCoordFile:str,
inputTopFile:str,
epsilonSurf:float,
sigmaSurf:float,
surfacePosition:float):
opt = optGenericWithSurface_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
dielectricConstant=dielectricConstant,
debyeLength=debyeLength,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile,
epsilonSurf=epsilonSurf,
sigmaSurf=sigmaSurf,
surfacePosition=surfacePosition)
with open(path,"w") as f:
f.write(opt)
def writeOptionsGenericWithSurfaceFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
inputCoordFile=optionsDict["inputCoordFile"]
dielectricConstant=optionsDict["dielectricConstant"]
debyeLength=optionsDict["debyeLength"]
inputTopFile=optionsDict["inputTopFile"]
epsilonSurf=optionsDict["epsilonSurf"]
sigmaSurf=optionsDict["sigmaSurf"]
surfacePosition=optionsDict["surfacePosition"]
writeOptionsGenericWithSurface(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
cutOffDst,
VerletListDst,
dielectricConstant,
debyeLength,
inputCoordFile,
inputTopFile,
epsilonSurf,
sigmaSurf,
surfacePosition)
def writeOptionsGenericClash(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
cutOffDst:float,
VerletListDst:float,
inputCoordFile:str,
inputTopFile:str,
lambd:float,
gamma:float):
opt = optGenericClash_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile,
lambd=lambd,
gamma=gamma)
with open(path,"w") as f:
f.write(opt)
def writeOptionsGenericClashFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
inputCoordFile=optionsDict["inputCoordFile"]
inputTopFile=optionsDict["inputTopFile"]
lambd=optionsDict["lambd"]
gamma=optionsDict["gamma"]
writeOptionsGenericClash(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
cutOffDst,
VerletListDst,
inputCoordFile,
inputTopFile,
lambd,
gamma)
def writeOptionsGenericClashWithCompression(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
cutOffDst:float,
VerletListDst:float,
inputCoordFile:str,
inputTopFile:str,
lambd:float,
gamma:float,
initialSphereRadius:float,
minimalSphereRadius:float,
compressionVelocity:float):
opt = optGenericClashWithCompression_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile,
lambd=lambd,
gamma=gamma,
initialSphereRadius=initialSphereRadius,
minimalSphereRadius=minimalSphereRadius,
compressionVelocity=compressionVelocity)
with open(path,"w") as f:
f.write(opt)
def writeOptionsGenericClashWithCompressionFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
inputCoordFile=optionsDict["inputCoordFile"]
inputTopFile=optionsDict["inputTopFile"]
lambd=optionsDict["lambd"]
gamma=optionsDict["gamma"]
initialSphereRadius=optionsDict["initialSphereRadius"]
minimalSphereRadius=optionsDict["minimalSphereRadius"]
compressionVelocity=optionsDict["compressionVelocity"]
writeOptionsGenericClashWithCompression(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
cutOffDst,
VerletListDst,
inputCoordFile,
inputTopFile,
lambd,
gamma,
initialSphereRadius,
minimalSphereRadius,
compressionVelocity)
def writeOptionsAFM(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
frictionConstantTip:float,
cutOffDst:float,
VerletListDst:float,
dielectricConstant:float,
debyeLength:float,
inputCoordFile:str,
inputTopFile:str,
epsilonSurf:float,
sigmaSurf:float,
surfacePosition:float,
initialTipSampleDst:float,
descentVelocity:float,
minimalChipHeight:float,
Mtip:float,
Rtip:float,
Kxytip:float,
Ktip:float,
epsilonTip:float,
sigmaTip:float,
Atip:float,
Btip:float,
epsilonTipSurf:float,
sigmaTipSurf :float,
ATipSurf:float,
BTipSurf:float,
nStepsIndentMeasure:str,
outputIndentationMeasureFilePath:str):
opt = optAFM_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
frictionConstantTip=frictionConstantTip,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
dielectricConstant=dielectricConstant,
debyeLength=debyeLength,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile,
epsilonSurf=epsilonSurf,
sigmaSurf=sigmaSurf,
surfacePosition=surfacePosition,
initialTipSampleDst=initialTipSampleDst,
descentVelocity=descentVelocity,
minimalChipHeight=minimalChipHeight,
Mtip=Mtip,
Rtip=Rtip,
Kxytip=Kxytip,
Ktip=Ktip,
epsilonTip=epsilonTip,
sigmaTip=sigmaTip,
Atip=Atip,
Btip=Btip,
epsilonTipSurf=epsilonTipSurf,
sigmaTipSurf =sigmaTipSurf,
ATipSurf=ATipSurf,
BTipSurf=BTipSurf,
nStepsIndentMeasure=nStepsIndentMeasure,
outputIndentationMeasureFilePath=outputIndentationMeasureFilePath)
with open(path,"w") as f:
f.write(opt)
def writeOptionsAFMFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
frictionConstantTip=optionsDict["frictionConstantTip"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
dielectricConstant=optionsDict["dielectricConstant"]
debyeLength=optionsDict["debyeLength"]
inputCoordFile=optionsDict["inputCoordFile"]
inputTopFile=optionsDict["inputTopFile"]
epsilonSurf=optionsDict["epsilonSurf"]
sigmaSurf=optionsDict["sigmaSurf"]
surfacePosition=optionsDict["surfacePosition"]
initialTipSampleDst=optionsDict["initialTipSampleDst"]
descentVelocity=optionsDict["descentVelocity"]
minimalChipHeight=optionsDict["minimalChipHeight"]
Mtip=optionsDict["Mtip"]
Rtip=optionsDict["Rtip"]
Kxytip=optionsDict["Kxytip"]
Ktip=optionsDict["Ktip"]
epsilonTip=optionsDict["epsilonTip"]
sigmaTip=optionsDict["sigmaTip"]
Atip=optionsDict["Atip"]
Btip=optionsDict["Btip"]
epsilonTipSurf=optionsDict["epsilonTipSurf"]
sigmaTipSurf =optionsDict["sigmaTipSurf"]
ATipSurf=optionsDict["ATipSurf"]
BTipSurf=optionsDict["BTipSurf"]
nStepsIndentMeasure=optionsDict["nStepsIndentMeasure"]
outputIndentationMeasureFilePath=optionsDict["outputIndentationMeasureFilePath"]
writeOptionsAFM(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
frictionConstantTip,
cutOffDst,
VerletListDst,
dielectricConstant,
debyeLength,
inputCoordFile,
inputTopFile,
epsilonSurf,
sigmaSurf,
surfacePosition,
initialTipSampleDst,
descentVelocity,
minimalChipHeight,
Mtip,
Rtip,
Kxytip,
Ktip,
epsilonTip,
sigmaTip,
Atip,
Btip,
epsilonTipSurf,
sigmaTipSurf,
ATipSurf,
BTipSurf,
nStepsIndentMeasure,
outputIndentationMeasureFilePath)
def writeOptionsGenericUmbrella(path:str,
nSteps:int,
nStepsInterval:int,
nStepsWrite:int,
nStepsBackupInterval:int,
outPutFilePath:str,
outPutFormat:str,
boxX:float,boxY:float,boxZ:float,
temperature:float,
steepestDecentScale:float,
nStepsSteepestDescent:int,
nStepsSteepestDescentProgressInterval:int,
maxObjectiveForce:float,
timeStep:float,
frictionConstant:float,
cutOffDst:float,
VerletListDst:float,
dielectricConstant:float,
debyeLength:float,
inputCoordFile:str,
inputTopFile:str,
umbrellaK:float,
umbrellaInit:float,
umbrellaEnd:float,
umbrellaWindowsNumber:int,
umbrellaCopies:int,
nStepsUmbrellaMeasure:int,
outputUmbrellaMeasureFilePath:str):
opt = optGenericUmbrella_template.format(nSteps=nSteps,
nStepsInterval=nStepsInterval,
nStepsWrite=nStepsWrite,
nStepsBackupInterval=nStepsBackupInterval,
outPutFilePath=outPutFilePath,
outPutFormat=outPutFormat,
boxX=boxX,boxY=boxY,boxZ=boxZ,
temperature=temperature,
steepestDecentScale=steepestDecentScale,
nStepsSteepestDescent=nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval=nStepsSteepestDescentProgressInterval,
maxObjectiveForce=maxObjectiveForce,
timeStep=timeStep,
frictionConstant=frictionConstant,
cutOffDst=cutOffDst,
VerletListDst=VerletListDst,
dielectricConstant=dielectricConstant,
debyeLength=debyeLength,
inputCoordFile=inputCoordFile,
inputTopFile=inputTopFile,
umbrellaK=umbrellaK,
umbrellaInit=umbrellaInit,
umbrellaEnd=umbrellaEnd,
umbrellaWindowsNumber=umbrellaWindowsNumber,
umbrellaCopies=umbrellaCopies,
nStepsUmbrellaMeasure=nStepsUmbrellaMeasure,
outputUmbrellaMeasureFilePath=outputUmbrellaMeasureFilePath)
with open(path,"w") as f:
f.write(opt)
def writeOptionsGenericUmbrellaFromDict(path,optionsDict):
nSteps=optionsDict["nSteps"]
nStepsInterval=optionsDict["nStepsInterval"]
nStepsWrite=optionsDict["nStepsWrite"]
nStepsBackupInterval=optionsDict["nStepsBackupInterval"]
outPutFilePath=optionsDict["outPutFilePath"]
outPutFormat=optionsDict["outPutFormat"]
boxX=optionsDict["boxX"]
boxY=optionsDict["boxY"]
boxZ=optionsDict["boxZ"]
temperature=optionsDict["temperature"]
steepestDecentScale=optionsDict["steepestDecentScale"]
nStepsSteepestDescent=optionsDict["nStepsSteepestDescent"]
nStepsSteepestDescentProgressInterval=optionsDict["nStepsSteepestDescentProgressInterval"]
maxObjectiveForce=optionsDict["maxObjectiveForce"]
timeStep=optionsDict["timeStep"]
frictionConstant=optionsDict["frictionConstant"]
cutOffDst=optionsDict["cutOffDst"]
VerletListDst=optionsDict["VerletListDst"]
dielectricConstant=optionsDict["dielectricConstant"]
debyeLength=optionsDict["debyeLength"]
inputCoordFile=optionsDict["inputCoordFile"]
inputTopFile=optionsDict["inputTopFile"]
umbrellaK=optionsDict["umbrellaK"]
umbrellaInit=optionsDict["umbrellaInit"]
umbrellaEnd=optionsDict["umbrellaEnd"]
umbrellaWindowsNumber=optionsDict["umbrellaWindowsNumber"]
umbrellaCopies=optionsDict["umbrellaCopies"]
nStepsUmbrellaMeasure=optionsDict["nStepsUmbrellaMeasure"]
outputUmbrellaMeasureFilePath=optionsDict["outputUmbrellaMeasureFilePath"]
writeOptionsGenericUmbrella(path,
nSteps,
nStepsInterval,
nStepsWrite,
nStepsBackupInterval,
outPutFilePath,
outPutFormat,
boxX,boxY,boxZ,
temperature,
steepestDecentScale,
nStepsSteepestDescent,
nStepsSteepestDescentProgressInterval,
maxObjectiveForce,
timeStep,
frictionConstant,
cutOffDst,
VerletListDst,
dielectricConstant,
debyeLength,
inputCoordFile,
inputTopFile,
umbrellaK,
umbrellaInit,
umbrellaEnd,
umbrellaWindowsNumber,
umbrellaCopies,
nStepsUmbrellaMeasure,
outputUmbrellaMeasureFilePath)
################################################
#writeOptionsGeneric("optionsTestGeneric.dat",
# 1000,
# 1000,
# 1000,
# 1000,
# "kk",
# "asdf",
# 1.23,2.13,3.12,
# 1.0,
# 1.5,
# 55555,
# 4444,
# -1.0,
# 0.123456,
# 2.0,
# 100.0,
# 150.0,
# "asdf.coord",
# "asdf.top")
#
#writeOptionsGenericWithSurface("optionsTestGenericSurface.dat",
# 1000,
# 1000,
# 1000,
# 1000,
# "kk",
# "asdf",
# 1.23,2.13,3.12,
# 1.0,
# 1.5,
# 55555,
# 4444,
# -1.0,
# 0.123456,
# 2.0,
# 100.0,
# 150.0,
# "asdf.coord",
# "asdf.top",
# 1.0,
# 2.0,
# -300)
#
#writeOptionsGenericClash("optionsTestClash.dat",
# 1000,
# 1000,
# 1000,
# 1000,
# "kk",
# "asdf",
# 1.23,2.13,3.12,
# 1.0,
# 1.5,
# 55555,
# 4444,
# -1.0,
# 0.123456,
# 2.0,
# 100.0,
# 150.0,
# "asdf.coord",
# "asdf.top",
# 2.0,
# 4.0)
#
#writeOptionsGenericClashWithCompression("optionsTestClashWithCompression.dat",
# 1000,
# 1000,
# 1000,
# 1000,
# "kk",
# "asdf",
# 1.23,2.13,3.12,
# 1.0,
# 1.5,
# 55555,
# 4444,
# -1.0,
# 0.123456,
# 2.0,
# 100.0,
# 150.0,
# "asdf.coord",
# "asdf.top",
# 2.0,
# 4.0,
# 300,
# 400,
# 0.01001)
#
#writeOptionsAFM("optionsAFM.dat",
# 1000,
# 1000,
# 1000,
# 1000,
# "kk",
# "asdf",
# 1.23,2.13,3.12,
# 1.0,
# 1.5,
# 55555,
# 4444,
# -1.0,
# 0.123456,
# 2.0,
# 100.0,
# 150.0,
# "asdf.coord",
# "asdf.top",
# 1.0,
# 2.0,
# -300,
# 1.123,
# 0.001,
# 2.323,
# 1.02,
# 5.02,
# 5698,
# 456,
# 5897,
# 57,
# 142566,
# 146,
# 1486,
# 98765,
# 12453,
# 1289)
| 45.661622
| 133
| 0.453796
| 1,817
| 42,237
| 10.53825
| 0.07705
| 0.03076
| 0.006267
| 0.023762
| 0.815699
| 0.815699
| 0.795958
| 0.777366
| 0.777366
| 0.769689
| 0
| 0.017476
| 0.489239
| 42,237
| 924
| 134
| 45.711039
| 0.870115
| 0.096645
| 0
| 0.708564
| 0
| 0
| 0.104034
| 0.02512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016575
| false
| 0
| 0
| 0
| 0.016575
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
26a3155651daa352ce8fc56508c8c57ec5b1ad78
| 119
|
py
|
Python
|
17.modules/3.dir.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
17.modules/3.dir.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
17.modules/3.dir.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
import platform
import lib
print("dir(lib) : ");
print(dir(lib));
print("\n\ndir(platform) : ");
print(dir(platform));
| 17
| 30
| 0.663866
| 17
| 119
| 4.647059
| 0.411765
| 0.303797
| 0.278481
| 0.35443
| 0.379747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10084
| 119
| 7
| 31
| 17
| 0.738318
| 0
| 0
| 0
| 0
| 0
| 0.258333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
26e237456b93168931882172c814155a6afa7fe6
| 143,360
|
py
|
Python
|
back/perturbtrafic_api/scripts/evenements_xml.py
|
sitn/perturbtrafic
|
405fdaf98a1e3538d5e26fd79c97f3a88b2d8231
|
[
"BSD-3-Clause"
] | null | null | null |
back/perturbtrafic_api/scripts/evenements_xml.py
|
sitn/perturbtrafic
|
405fdaf98a1e3538d5e26fd79c97f3a88b2d8231
|
[
"BSD-3-Clause"
] | 5
|
2019-12-26T17:37:51.000Z
|
2021-12-13T20:33:39.000Z
|
back/perturbtrafic_api/scripts/evenements_xml.py
|
sitn/perturbtrafic
|
405fdaf98a1e3538d5e26fd79c97f3a88b2d8231
|
[
"BSD-3-Clause"
] | 1
|
2020-01-09T09:57:59.000Z
|
2020-01-09T09:57:59.000Z
|
import json
import os
import datetime
import logging
import xmltodict
from sqlalchemy import exc
import transaction
from .. import models
from ..scripts.utils import Utils
log = logging.getLogger(__name__)
class EvenementXML():
folder_path = None
settings = None
request = None
@classmethod
def list_folder_files(cls, request):
try:
cls.request = request
cls.settings = cls.request.registry.settings
cls.folder_path = cls.settings['evenements_xml_files_folder']
return [f for f in os.listdir(cls.folder_path) if f.endswith('.xml')]
except Exception as error:
raise Exception(str(error))
return []
@classmethod
def remove_file(cls, request, file_name):
try:
os.remove(request.registry.settings['evenements_xml_files_folder'] + '/' + file_name)
except Exception as error:
raise Exception(str(error))
return []
@classmethod
def move_file_to_success_folder(cls, request, file_name):
try:
os.rename(request.registry.settings['evenements_xml_files_folder'] + '/' + file_name,
request.registry.settings['evenements_xml_files_success_folder'] + '/' + file_name)
except Exception as error:
raise Exception(str(error))
return []
@classmethod
def move_file_to_failure_folder(cls, request, file_name):
try:
os.rename(request.registry.settings['evenements_xml_files_folder'] + '/' + file_name,
request.registry.settings['evenements_xml_files_failure_folder'] + '/' + file_name)
except Exception as error:
raise Exception(str(error))
return []
@classmethod
def xml_to_json(cls, filename):
try:
with open(cls.folder_path + '/' + filename, mode="r", encoding="utf-8") as file:
data = file.read().replace('\n', '')
xpars = xmltodict.parse(data)
return xpars
except Exception as e:
log.error(str(e), exc_info=True)
log.debug("Debug_GL: xml_to_json: Fichier XML ({}) incomplet -> return None".format(filename))
return None
@classmethod
def add_file_data(cls, file_json):
try:
if 'dossiers' in file_json:
dossiers = file_json['dossiers']
if dossiers and 'dossier' in dossiers:
dossier = dossiers['dossier']
if dossier:
id_dossier = dossier['@id']
# (1) Autre entrave
if id_dossier and int(id_dossier) == 3:
return cls.add_file_data_autre_entrave(dossier)
# (2) Fouille
elif id_dossier and int(id_dossier) == 1:
return cls.add_file_data_fouille(dossier)
except exc.ResourceClosedError as e:
log.error(str(e), exc_info=True)
log.info('Debug_GL: add_file_data: Error using file_json: {}'.format(file_json))
return False
return True
@classmethod
def add_file_data_autre_entrave(cls, dossier):
try:
max_event_id = None
cls.request.dbsession.begin_nested()
log.debug("Debug_GL: add_file_data_autre_entrave: begin_nested() effectué")
with transaction.manager:
cls.request.dbsession.execute('set search_path to ' + cls.settings['schema_name'])
if dossier:
# libelle
title = dossier['title'] if 'title' in dossier else None
# description
description = dossier['descriptions']['descr_lib'] if 'descriptions' in dossier and \
'descr_lib' in dossier['descriptions'] else None
# Reférence CAMAC
ref_camac = dossier['instance_id'] if 'instance_id' in dossier else None
# # Section_evenement entrave : id 51
# Date debut : id 121
date_debut = None
# Heure debut : id 122
heure_debut = None
# Date fin : id 123
date_fin = None
# Heure fin : id 124
heure_fin = None
# Surface : id 15
surface = None
# Longueur_etape : id 16
longueur_etape = None
# Cause_entrave : id 134
cause_entrave = None
# Description_entrave : id 135
description_entrave = None
# Adresse de la fouille : id 201
adresse_fouille = None
# # Section_requerant_entreprise entrave : id 1
# Rue requerant entreprise : id 2
rue_requerant_entreprise = None
# Localite requerant entreprise : id 3
localite_requerant_entreprise = None
# Telephone requerant entreprise : id 4
telephone_requerant_entreprise = None
# Fax requerant entreprise : id 5
fax_requerant_entreprise = None
# Courriel requerant entreprise : id 6
courriel_requerant_entreprise = None
# Nom requerant entreprise : id 1
nom_requerant_entreprise = None
# # Section_requerant_personne entrave : id 32
# Nom requerant personne : id 100
nom_requerant_personne = None
# Prenom requerant personne : id 101
prenom_requerant_personne = None
# Mobile requerant personne : id 102
mobile_requerant_personne = None
# Telephone requerant personne : id 103
telephone_requerant_personne = None
# Fax requerant personne : id 104
fax_requerant_personne = None
# Courriel requerant personne : id 105
courriel_requerant_personne = None
# # Section_maitre_ouvrage_dir_loc entrave : id 33
# Nom maitre ouvrage direction locale : id 106
nom_maitre_ouvrage_dir_loc = None
# Prenom maitre ouvrage direction locale : id 107
prenom_maitre_ouvrage_dir_loc = None
# Mobile maitre ouvrage direction locale : id 108
mobile_maitre_ouvrage_dir_loc = None
# Telephone maitre ouvrage direction locale : id 109
telephone_maitre_ouvrage_dir_loc = None
# Fax maitre ouvrage direction locale : id 110
fax_maitre_ouvrage_dir_loc = None
# Courriel maitre ouvrage direction locale : id 111
courriel_maitre_ouvrage_dir_loc = None
# # section_maitre_ouvrage_entreprise entrave : id 2
# Rue maitre ouvrage entreprise : id 2
rue_maitre_ouvrage_entreprise = None
# Localite maitre ouvrage entreprise : id 3
localite_maitre_ouvrage_entreprise = None
# Telephone maitre ouvrage entreprise : id 4
telephone_maitre_ouvrage_entreprise = None
# Fax maitre ouvrage entreprise : id 5
fax_maitre_ouvrage_entreprise = None
# Courriel maitre ouvrage entreprise : id 6
courriel_maitre_ouvrage_entreprise = None
# Nom maitre ouvrage entreprise : id 1
nom_maitre_ouvrage_entreprise = None
# # section_facturation entrave : id 4
# Adresse facturation : id 10
adresse_facturation = None
# # section_loc_periode entrave : id 52
# Num bien-fonds : id 97
num_bien_fonds = None
# Commune : id 12
commune = None
# Coordonnee X : id 94
coordonnee_x = None
# Coordonnee Y : id 95
coordonnee_y = None
# Geometry collection : id 213 & 222
geometry_collection = None
# Cadastre : id 96
cadastre = None
# Lieu dit : id 99
lieu_dit = None
# Service à appliquer : id 242
service_a_appliquer = None
form = dossier['forms']['form'] if 'forms' in dossier and 'form' in dossier['forms'] else None
if form and len(form) > 0:
"""------------- (1) Form evenement ---------------"""
evenement_form = [f for f in form if f['@id'] == '45']
evenement_form = evenement_form[0] if len(evenement_form) > 0 else None
if evenement_form:
section = evenement_form['sections']['section'] if 'sections' in evenement_form and \
'section' in evenement_form['sections'] else None
if section and len(section) > 0:
"""( 1.1) Info evenement """
# date_debut, heure_debut, date_fin, heure_fin, surface ....
section_evenement = [sec for sec in section if sec['@id'] == '51']
section_evenement = section_evenement[0] if len(section_evenement) > 0 else None
evenement_question = section_evenement['questions']['question'] if section_evenement and \
'questions' in section_evenement and 'question' in section_evenement['questions'] else None
if not evenement_question:
return False
for one_evenement_question in evenement_question:
id = one_evenement_question['@id']
# surface
if id == '15':
surface = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
try:
surface = float(surface)
except Exception:
surface = None
# longueur_etape
elif id == '16':
longueur_etape = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
# date_debut
elif id == '121':
date_debut = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
date_debut = date_debut.replace(".", "-") if date_debut else None
if date_debut is None:
log.error('date_debut is null', exc_info=True)
return False
# heure_debut
elif id == '122':
heure_debut = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
if heure_debut is None:
log.error('heure_debut is null')
return False
heure_debut = heure_debut.lower().replace(".", ":").replace("h", ":")
heure_debut = heure_debut + ":00" if len(heure_debut) <= 2 else heure_debut
heure_debut = "0" + heure_debut if len(heure_debut) == 3 else heure_debut
# date_fin
elif id == '123':
date_fin = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
date_fin = date_fin.replace(".", "-") if date_fin else None
# heure_fin
elif id == '124':
heure_fin = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
if heure_fin:
heure_fin = heure_fin.lower().replace(".", ":").replace("h", ":")
heure_fin = heure_fin + ":00" if len(heure_fin) <= 2 else heure_fin
heure_fin = "0" + heure_fin if len(heure_fin) == 3 else heure_fin
# cause_entrave
elif id == '134':
cause_entrave = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
# description_entrave
elif id == '135':
description_entrave = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
# adresse_fouille
elif id == '201':
adresse_fouille = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
"""( 1.2) Reque©rant - Entreprise / Service / Commune """
section_requerant_entreprise = [sec for sec in section if sec['@id'] == '1']
section_requerant_entreprise = section_requerant_entreprise[0]\
if len(section_requerant_entreprise) > 0 else None
requerant_entreprise_question = section_requerant_entreprise['questions']['question'] \
if section_requerant_entreprise and 'questions' in section_requerant_entreprise and \
'question' in section_requerant_entreprise['questions'] else None
for one_requerant_entreprise_question in requerant_entreprise_question:
id = one_requerant_entreprise_question['@id']
# nom_requerant_entreprise
if id == '1':
nom_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# rue_requerant_entreprise
elif id == '2':
rue_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# localite_requerant_entreprise
elif id == '3':
localite_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# telephone_requerant_entreprise
elif id == '4':
telephone_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# fax_requerant_entreprise
elif id == '5':
fax_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# courriel_requerant_entreprise
elif id == '6':
courriel_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
"""( 1.3) Reque©rant - Personne de contact """
section_requerant_personne = [sec for sec in section if sec['@id'] == '32']
section_requerant_personne = section_requerant_personne[0]\
if len(section_requerant_personne) > 0 else None
requerant_personne_question = section_requerant_personne['questions']['question'] \
if section_requerant_personne and 'questions' in section_requerant_personne and \
'question' in section_requerant_personne['questions'] else None
for one_requerant_personne_question in requerant_personne_question:
id = one_requerant_personne_question['@id']
# nom_requerant_personne
if id == '100':
nom_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# prenom_requerant_personne
elif id == '101':
prenom_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# mobile_requerant_personne
elif id == '102':
mobile_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# telephone_requerant_personne
elif id == '103':
telephone_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# fax_requerant_personne
elif id == '104':
fax_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# courriel_requerant_personne
elif id == '105':
courriel_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
"""( 1.4) Maitre de l'ouvrage ou mandataire - Direction locale """
section_maitre_ouvrage_dir_loc = [sec for sec in section if sec['@id'] == '33']
section_maitre_ouvrage_dir_loc = section_maitre_ouvrage_dir_loc[0]\
if len(section_maitre_ouvrage_dir_loc) > 0 else None
maitre_ouvrage_question_dir_loc = \
section_maitre_ouvrage_dir_loc['questions']['question'] \
if section_maitre_ouvrage_dir_loc and 'questions' in section_maitre_ouvrage_dir_loc and \
'question' in section_maitre_ouvrage_dir_loc['questions'] else None
for one_maitre_ouvrage_question_dir_loc in maitre_ouvrage_question_dir_loc:
id = one_maitre_ouvrage_question_dir_loc['@id']
# nom_maitre_ouvrage_dir_loc
if id == '106':
nom_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# prenom_maitre_ouvrage_dir_loc
elif id == '107':
prenom_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# mobile_maitre_ouvrage_dir_loc
elif id == '108':
mobile_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# telephone_maitre_ouvrage_dir_loc
elif id == '109':
telephone_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# fax_maitre_ouvrage_dir_loc
elif id == '110':
fax_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# courriel_maitre_ouvrage_dir_loc
elif id == '111':
courriel_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
"""( 1.5) Maitre de l'ouvrage ou mandataire - Entreprise / Bureau d'ingénieur """
section_maitre_ouvrage_entreprise = [sec for sec in section if sec['@id'] == '2']
section_maitre_ouvrage_entreprise = section_maitre_ouvrage_entreprise[0]\
if len(section_maitre_ouvrage_entreprise) > 0 else None
maitre_ouvrage_entreprise_question = \
section_maitre_ouvrage_entreprise['questions']['question'] \
if section_maitre_ouvrage_entreprise and 'questions' in section_requerant_entreprise and \
'question' in section_maitre_ouvrage_entreprise['questions'] else None
for one_maitre_ouvrage_entreprise_question in maitre_ouvrage_entreprise_question:
id = one_maitre_ouvrage_entreprise_question['@id']
# nom_maitre_ouvrage_entreprise
if id == '1':
nom_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
# rue_maitre_ouvrage_entreprise
elif id == '2':
rue_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
# localite_maitre_ouvrage_entreprise
elif id == '3':
localite_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
# telephone_maitre_ouvrage_entreprise
elif id == '4':
telephone_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
# fax_maitre_ouvrage_entreprise
elif id == '5':
fax_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
# courriel_maitre_ouvrage_entreprise
elif id == '6':
courriel_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value'] if \
'answers' in one_maitre_ouvrage_entreprise_question and \
'answer' in one_maitre_ouvrage_entreprise_question['answers'] and \
'value' in one_maitre_ouvrage_entreprise_question['answers']['answer'] else None
"""( 1.6) Facturation """
section_facturation = [sec for sec in section if sec['@id'] == '4']
section_facturation = section_facturation[0] if len(section_facturation) > 0 else None
facturation_question = \
section_facturation['questions']['question'] if section_facturation \
and 'questions' in section_facturation \
and 'question' in section_facturation['questions'] else None
if section_facturation:
id = section_facturation['@id']
# adresse_facturation
if id == '4':
adresse_facturation = section_facturation['answers']['answer']['value'] \
if 'answers' in section_facturation and \
'answer' in section_facturation['answers'] and \
'value' in section_facturation['answers']['answer'] else None
"""( 1.7) Localisation et période """
section_loc_periode = [sec for sec in section if sec['@id'] == '52']
section_loc_periode = section_loc_periode[0] if len(section_loc_periode) > 0 else None
loc_periode_question = \
section_loc_periode['questions']['question'] if section_loc_periode and \
'questions' in section_loc_periode and \
'question' in section_loc_periode['questions'] else None
for one_loc_periode_question in loc_periode_question:
id = one_loc_periode_question['@id']
# commune
if id == '12':
commune = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# coordonnee_x
elif id == '94':
coordonnee_x = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# coordonnee_y
elif id == '95':
coordonnee_y = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# cadastre
elif id == '96':
cadastre = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# num_bien_fonds
elif id == '97':
num_bien_fonds = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# lieu_dit
elif id == '99':
lieu_dit = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# geometry_collection 213
elif id == '213':
geometry_collection = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# geometry_collection 222
elif id == '222':
geometry_collection = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# service_a_appliquer
elif id == '242':
service_a_appliquer = \
one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# Evenement model
evenement_model = models.Evenement(
id_entite=cls.settings['id_entite_xml_import'],
id_responsable=cls.settings['id_responsable_xml_import'],
type=int(cls.settings['autre_evenement_id']),
libelle=title,
description=description,
ref_camac=ref_camac,
date_debut=datetime.datetime.strptime(date_debut, cls.settings[
'xml_date_template']) if date_debut else None,
heure_debut=heure_debut,
date_fin=datetime.datetime.strptime(date_fin, cls.settings[
'xml_date_template']) if date_fin else None,
heure_fin=heure_fin,
nom_requerant=nom_requerant_entreprise,
rue_requerant=rue_requerant_entreprise,
localite_requerant=localite_requerant_entreprise,
telephone_requerant=telephone_requerant_entreprise,
fax_requerant=fax_requerant_entreprise,
courriel_requerant=courriel_requerant_entreprise,
nom_contact=nom_requerant_personne,
prenom_contact=prenom_requerant_personne,
mobile_contact=mobile_requerant_personne,
telephone_contact=telephone_requerant_personne,
fax_contact=fax_requerant_personne,
courriel_contact=courriel_requerant_personne,
id_utilisateur_ajout=cls.settings['id_user_ajout_xml_import'],
id_utilisateur_modification=cls.settings['id_user_ajout_xml_import'],
numero_dossier=Utils.generate_numero_dossier(cls.request,
int(cls.settings['autre_evenement_id'])),
localisation=commune
)
if evenement_model:
cls.request.dbsession.add(evenement_model)
cls.request.dbsession.flush()
max_event_id = evenement_model.id
# Autre evenement model
autre_ev_model = models.AutreEvenement(
id_evenement=max_event_id,
cause=cause_entrave,
nom_maitre_ouvrage=nom_maitre_ouvrage_entreprise,
rue_maitre_ouvrage=rue_maitre_ouvrage_entreprise,
localite_maitre_ouvrage=localite_maitre_ouvrage_entreprise,
telephone_maitre_ouvrage=telephone_maitre_ouvrage_entreprise,
fax_maitre_ouvrage=fax_maitre_ouvrage_entreprise,
courriel_maitre_ouvrage=courriel_maitre_ouvrage_entreprise,
nom_direction_locale=nom_maitre_ouvrage_dir_loc,
prenom_direction_locale=prenom_maitre_ouvrage_dir_loc,
mobile_direction_locale=mobile_maitre_ouvrage_dir_loc,
telephone_direction_locale=telephone_maitre_ouvrage_dir_loc,
fax_direction_locale=fax_maitre_ouvrage_dir_loc,
courriel_direction_locale=courriel_maitre_ouvrage_dir_loc,
)
cls.request.dbsession.add(autre_ev_model)
# Evenement point model
if geometry_collection is None:
geometry = json.loads('{"type":"Point","coordinates":['
+ coordonnee_x + ',' + coordonnee_y + ']}')
evenement_point_model = models.EvenementPoint(id_evenement=max_event_id)
evenement_point_model.set_json_geometry(str(geometry), cls.settings['srid'])
pass
# cls.request.dbsession.add(evenement_point_model)
Utils.add_ev_geometries(cls.request, geometry_collection, max_event_id)
"""------------- (2) Form perturbation ---------------"""
perturbations_form = [f for f in form if f['@id'] == '46']
perturbations_form = perturbations_form[0] if len(perturbations_form) > 0 else None
if perturbations_form:
# Occupations perturbations
perturbations_values = {}
section = perturbations_form['sections']['section'] \
if perturbations_form and 'sections' in perturbations_form and \
'section' in perturbations_form['sections'] else None
if section and len(section) > 0:
"""( 2.1) Type perturbation """
section_type_perturbation = [sec for sec in section if sec['@id'] == '42']
section_type_perturbation = section_type_perturbation[0]\
if len(section_type_perturbation) > 0 else None
type_perturbation_question = \
section_type_perturbation['questions']['question'] \
if section_type_perturbation and 'questions' in section_type_perturbation and \
'question' in section_type_perturbation['questions'] else None
if section_type_perturbation:
id = section_type_perturbation['@id']
# type_pertubation
if id == '42':
item_id = type_perturbation_question['answers']['answer']['@item']
perturbations_values[item_id] = {}
type_pertubation = \
type_perturbation_question['answers']['answer']['value'] \
if 'answers' in type_perturbation_question and \
'answer' in type_perturbation_question['answers'] and \
'value' in type_perturbation_question['answers']['answer'] else None
if type_pertubation == "Occupation":
type_pertubation = int(
cls.settings['occupation_perturbation_id'])
elif type_pertubation == "Fermeture":
type_pertubation = int(
cls.settings['fermeture_perturbation_id'])
perturbations_values[item_id]['type_pertubation'] = type_pertubation
# Other perturbations types
section_type_other_perturbation = [sec for sec in section if sec['@id'] == '45']
section_type_other_perturbation = section_type_other_perturbation[0] \
if section_type_other_perturbation and \
len(section_type_other_perturbation) > 0 else section_type_other_perturbation
if section_type_other_perturbation:
type_other_perturbation_question = \
section_type_other_perturbation['questions']['question'] \
if section_type_other_perturbation \
and 'questions' in section_type_other_perturbation \
and 'question' in section_type_other_perturbation['questions'] else None
other_perturb_types_answers = cls.get_answers(type_other_perturbation_question)
for one_item_id in other_perturb_types_answers:
one_type_pertubation = other_perturb_types_answers[one_item_id]
if one_item_id and one_type_pertubation:
one_item_id = int(one_item_id)
perturbations_values[str(one_item_id + 1)] = {}
if one_type_pertubation == "Occupation":
perturbations_values[str(one_item_id + 1)]['type_pertubation'] = int(
cls.settings['occupation_perturbation_id'])
elif one_type_pertubation == "Fermeture":
perturbations_values[str(one_item_id + 1)]['type_pertubation'] = int(
cls.settings['fermeture_perturbation_id'])
"""(2.2) Occupation"""
section_occupation = [sec for sec in section if sec['@id'] == '41']
section_occupation = section_occupation[0] if len(section_occupation) > 0 else None
# Set occupation values
if section_occupation:
cls.set_ocuppations_values(section_occupation, perturbations_values)
"""(2.2) Fermeture"""
section_fermeture = [sec for sec in section if sec['@id'] == '43']
section_fermeture = section_fermeture[0] if len(section_fermeture) > 0 else None
# Set fermetures values
if section_fermeture:
cls.set_fermetures_values(section_fermeture, perturbations_values)
for one_perturb_item_id in perturbations_values:
one_perturb_item = perturbations_values[one_perturb_item_id]
if 'type_pertubation' in one_perturb_item:
type_pertubation = one_perturb_item['type_pertubation']
# Check date_debut, if less than 24h, urgence=true
urgence = False
date_debut = one_perturb_item['date_debut']\
if 'date_debut' in one_perturb_item else None
heure_debut = one_perturb_item['heure_debut']\
if 'heure_debut' in one_perturb_item else None
date_fin = one_perturb_item['date_fin'] if 'date_fin' in one_perturb_item else None
heure_fin = one_perturb_item['heure_fin'] if 'heure_fin' in one_perturb_item else None
if date_debut is not None and heure_debut is not None:
date_time_str = str(date_debut) + ' ' + str(heure_debut)
date_time_obj = datetime.datetime.strptime(date_time_str, cls.settings[
'xml_date_template'] + ' ' + cls.settings['xml_heure_template'])
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + datetime.timedelta(days=1):
urgence = True
perturbation_model = models.Perturbation(
id_evenement=max_event_id,
type=type_pertubation,
description=description_entrave,
date_debut=datetime.datetime.strptime(date_debut, cls.settings[
'xml_date_template']) if date_debut else None,
heure_debut=heure_debut,
date_fin=datetime.datetime.strptime(date_fin, cls.settings[
'xml_date_template']) if date_fin else None,
heure_fin=heure_fin,
nom_responsable_trafic=one_perturb_item['nom']
if 'nom' in one_perturb_item else None,
prenom_responsable_trafic=one_perturb_item['prenom']
if 'prenom' in one_perturb_item else None,
mobile_responsable_trafic=one_perturb_item['mobile']
if 'mobile' in one_perturb_item else None,
telephone_responsable_trafic=one_perturb_item['telephone']
if 'telephone' in one_perturb_item else None,
# fax_responsable_trafic=fax,
courriel_responsable_trafic=one_perturb_item['courriel']
if 'courriel' in one_perturb_item else None,
remarque=one_perturb_item['remarque'] if 'remarque' in one_perturb_item else None,
id_utilisateur_ajout=cls.settings['id_user_ajout_xml_import'],
id_utilisateur_modification=cls.settings['id_user_ajout_xml_import'],
urgence=urgence,
etat=cls.settings['perturbation_etat_acceptee_code']
if urgence is True else cls.settings['perturbation_etat_attente_code']
)
if perturbation_model:
cls.request.dbsession.add(perturbation_model)
cls.request.dbsession.flush()
max_perturbation_id = perturbation_model.id
# Occupation
if type_pertubation == int(cls.settings['occupation_perturbation_id']) \
and section_fermeture:
occupation_model = models.Occupation(
id_perturbation=max_perturbation_id,
id_responsable_regulation=cls.settings['id_responsable_xml_import'],
type_occupation=one_perturb_item['type_occupation']
if 'type_occupation' in one_perturb_item else None,
type_regulation=one_perturb_item['type_regulation']
if 'type_regulation' in one_perturb_item else None,
voies_condamnees=one_perturb_item['voies_condamnees']
if 'voies_condamnees' in one_perturb_item else None,
largeur_gabarit=one_perturb_item['largeur_gabarit']
if 'largeur_gabarit' in one_perturb_item else None,
hauteur_gabarit=one_perturb_item['hauteur_gabarit']
if 'hauteur_gabarit' in one_perturb_item else None,
heure_pointe=one_perturb_item['occupation_heures_pointes']
if 'occupation_heures_pointes' in one_perturb_item else None,
week_end=one_perturb_item['occupation_weekend']
if '' in one_perturb_item else None)
cls.request.dbsession.add(occupation_model)
# Fermeture
elif type_pertubation == int(cls.settings['fermeture_perturbation_id']) \
and section_fermeture:
fermeture_model = models.Fermeture(
id_perturbation=max_perturbation_id, deviation=one_perturb_item[
'deviation']
if 'deviation' in one_perturb_item else None,
id_responsable=cls.settings['id_responsable_xml_import'])
cls.request.dbsession.add(fermeture_model)
# Geometries
Utils.add_perturb_geometries(cls.request, geometry_collection, max_perturbation_id)
# Commit transaction # inutile car géré par le manager ?
log.debug("Debug_GL: add_file_data_autre_entrave: Tentative de flush final pour ce fichier")
cls.request.dbsession.flush()
log.debug("Debug_GL: add_file_data_autre_entrave: Flush effectué")
except Exception as e:
log.error(str(e))
log.debug("Debug_GL: add_file_data_autre_entrave: Exception -> Tentative de rollback")
cls.request.dbsession.rollback()
log.debug("Debug_GL: add_file_data_autre_entrave: Exception -> Rollback effectué")
# raise e
log.debug("Debug_GL: add_file_data_autre_entrave: Exception -> return False")
return False
return True
@classmethod
def add_file_data_fouille(cls, dossier):
try:
max_event_id = None
cls.request.dbsession.begin_nested()
log.debug("Debug_GL: add_file_data_fouille: begin_nested() effectué")
with transaction.manager:
cls.request.dbsession.execute('set search_path to ' + cls.settings['schema_name'])
if dossier:
# libelle
title = dossier['title'] if 'title' in dossier else None
# description
description = dossier['descriptions']['descr_lib'] \
if 'descriptions' in dossier and 'descr_lib' in dossier['descriptions'] else None
# Reférence CAMAC
ref_camac = dossier['instance_id'] if 'instance_id' in dossier else None
# # Section_evenement fouille : id 44
# Date debut : id 121
date_debut = None
# Heure debut : id 122
heure_debut = None
# Date fin : id 123
date_fin = None
# Heure fin : id 124
heure_fin = None
# Cause_fouille : id 134
cause_entrave = None
# Description_fouille : id 135
description_entrave = None
# Adresse de la fouille : id 201
adresse_fouille = None
# Section_requerant_entreprise fouille : id 1
# Rue requerant entreprise : id 2
rue_requerant_entreprise = None
# Localite requerant entreprise : id 3
localite_requerant_entreprise = None
# Telephone requerant entreprise : id 4
telephone_requerant_entreprise = None
# Fax requerant entreprise : id 5
fax_requerant_entreprise = None
# Courriel requerant entreprise : id 6
courriel_requerant_entreprise = None
# Nom requerant entreprise : id 1
nom_requerant_entreprise = None
# # Section_requerant_personne fouille : id 32
# Nom requerant personne : id 100
nom_requerant_personne = None
# Prenom requerant personne : id 101
prenom_requerant_personne = None
# Mobile requerant personne : id 102
mobile_requerant_personne = None
# Telephone requerant personne : id 103
telephone_requerant_personne = None
# Fax requerant personne : id 104
fax_requerant_personne = None
# Courriel requerant personne : id 105
courriel_requerant_personne = None
# section_maitre_ouvrage_entreprise fouille : id 2
# Rue maitre ouvrage entreprise : id 2
rue_maitre_ouvrage_entreprise = None
# Localite maitre ouvrage entreprise : id 3
localite_maitre_ouvrage_entreprise = None
# Telephone maitre ouvrage entreprise : id 4
telephone_maitre_ouvrage_entreprise = None
# Fax maitre ouvrage entreprise : id 5
fax_maitre_ouvrage_entreprise = None
# Courriel maitre ouvrage entreprise : id 6
courriel_maitre_ouvrage_entreprise = None
# Nom maitre ouvrage entreprise : id 1
nom_maitre_ouvrage_entreprise = None
# Section_maitre_ouvrage_dir_loc fouille : id 33
# Nom maitre ouvrage direction locale : id 106
nom_maitre_ouvrage_dir_loc = None
# Prenom maitre ouvrage direction locale : id 107
prenom_maitre_ouvrage_dir_loc = None
# Mobile maitre ouvrage direction locale : id 108
mobile_maitre_ouvrage_dir_loc = None
# Telephone maitre ouvrage direction locale : id 109
telephone_maitre_ouvrage_dir_loc = None
# Fax maitre ouvrage direction locale : id 110
fax_maitre_ouvrage_dir_loc = None
# Courriel maitre ouvrage direction locale : id 111
courriel_maitre_ouvrage_dir_loc = None
# Section_entrepreneur_entreprise fouille : id 3
# Nom / Raison sociale entrepreneur entreprise : id 1
nom_entrepreneur_entreprise = None
# Rue et numéro sociale entrepreneur entreprise : id 2
rue_entrepreneur_entreprise = None
# NPA et localité entrepreneur entreprise : id 3
npa_localite_entrepreneur_entreprise = None
# N° de téléphonee entrepreneur entreprise : id 4
telephone_entrepreneur_entreprise = None
# N° de fax entrepreneur entreprise : id 5
fax_entrepreneur_entreprise = None
# Courriel entrepreneur entreprise : id 6
courriel_entrepreneur_entreprise = None
# Section_entrepreneur_resp_travaux fouille : id 34
# Nom entrepreneur responsable travaux : id 112
nom_entrepreneur_responsable_travaux = None
# Prenom entrepreneur responsable travaux : id 113
prenom_entrepreneur_responsable_travaux = None
# Mobile entrepreneur responsable travaux : id 114
mobile_entrepreneur_responsable_travaux = None
# Telephone entrepreneur responsable travaux : id 115
telephone_entrepreneur_responsable_travaux = None
# Fax entrepreneur responsable travaux : id 116
fax_entrepreneur_responsable_travaux = None
# Courriel entrepreneur responsable travaux : id 117
courriel_entrepreneur_responsable_travaux = None
# # section_facturation fouille : id 4
# Adresse facturation : id 10
adresse_facturation = None
# section_loc_periode fouille : id 5
# Num bien-fonds : id 97
num_bien_fonds = None
# Commune : id 12
commune = None
# Coordonnee X : id 94
coordonnee_x = None
# Coordonnee Y : id 95
coordonnee_y = None
# Geometry collection : id 223
geometry_collection = None
# Cadastre : id 96
cadastre = None
# Lieu dit : id 99
lieu_dit = None
# Service à appliquer : id 93
service_a_appliquer = None
form = dossier['forms']['form'] if 'forms' in dossier and 'form' in dossier['forms'] else None
if form and len(form) > 0:
"""------------- (1) Form evenement ---------------"""
evenement_form = [f for f in form if f['@id'] == '1']
evenement_form = evenement_form[0] if len(evenement_form) > 0 else None
if evenement_form:
section = evenement_form['sections']['section'] \
if 'sections' in evenement_form and 'section' in evenement_form['sections'] else None
if section and len(section) > 0:
"""( 1.1) Info evenement """
# date_debut, heure_debut, date_fin, heure_fin, surface ....
section_evenement = [sec for sec in section if sec['@id'] == '44']
section_evenement = section_evenement[0] if len(section_evenement) > 0 else None
evenement_question = section_evenement['questions']['question'] \
if section_evenement and 'questions' in section_evenement and \
'question' in section_evenement['questions'] else None
if not evenement_question:
return False
for one_evenement_question in evenement_question:
id = one_evenement_question['@id']
# date_debut
if id == '121':
date_debut = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
date_debut = date_debut.replace(".", "-") if date_debut else None
if date_debut is None:
log.error('date_debut is null', exc_info=True)
return False
# heure_debut
elif id == '122':
heure_debut = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
if heure_debut is None:
log.error('heure_debut is null')
return False
heure_debut = heure_debut.lower().replace(".", ":").replace("h", ":")
heure_debut = heure_debut + ":00" if len(heure_debut) <= 2 else heure_debut
heure_debut = "0" + heure_debut if len(heure_debut) == 3 else heure_debut
# date_fin
elif id == '123':
date_fin = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
date_fin = date_fin.replace(".", "-") if date_fin else None
# heure_fin
elif id == '124':
heure_fin = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
if heure_fin:
heure_fin = heure_fin.lower().replace(".", ":").replace("h", ":")
heure_fin = heure_fin + ":00" if len(heure_fin) <= 2 else heure_fin
heure_fin = "0" + heure_fin if len(heure_fin) == 3 else heure_fin
# cause_fouille
elif id == '134':
cause_fouille = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
# description_fouille
elif id == '135':
description_fouille = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
# adresse_fouille
elif id == '201':
adresse_fouille = one_evenement_question['answers']['answer']['value'] \
if 'answers' in one_evenement_question and \
'answer' in one_evenement_question['answers'] and \
'value' in one_evenement_question['answers']['answer'] else None
"""( 1.2) Reque©rant - Entreprise / Service / Commune """
section_requerant_entreprise = [sec for sec in section if sec['@id'] == '1']
section_requerant_entreprise = section_requerant_entreprise[0]\
if len(section_requerant_entreprise) > 0 else None
requerant_entreprise_question = section_requerant_entreprise['questions']['question'] \
if section_requerant_entreprise and 'questions' in section_requerant_entreprise and \
'question' in section_requerant_entreprise['questions'] else None
for one_requerant_entreprise_question in requerant_entreprise_question:
id = one_requerant_entreprise_question['@id']
# nom_requerant_entreprise
if id == '1':
nom_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# rue_requerant_entreprise
elif id == '2':
rue_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# localite_requerant_entreprise
elif id == '3':
localite_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# telephone_requerant_entreprise
elif id == '4':
telephone_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# fax_requerant_entreprise
elif id == '5':
fax_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
# courriel_requerant_entreprise
elif id == '6':
courriel_requerant_entreprise = \
one_requerant_entreprise_question['answers']['answer']['value'] \
if 'answers' in one_requerant_entreprise_question and \
'answer' in one_requerant_entreprise_question['answers'] and \
'value' in one_requerant_entreprise_question['answers']['answer'] else None
"""( 1.3) Reque©rant - Personne de contact """
section_requerant_personne = [sec for sec in section if sec['@id'] == '32']
section_requerant_personne = section_requerant_personne[0]\
if len(section_requerant_personne) > 0 else None
requerant_personne_question = section_requerant_personne['questions']['question'] \
if section_requerant_personne and 'questions' in section_requerant_personne and \
'question' in section_requerant_personne['questions'] else None
for one_requerant_personne_question in requerant_personne_question:
id = one_requerant_personne_question['@id']
# nom_requerant_personne
if id == '100':
nom_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# prenom_requerant_personne
elif id == '101':
prenom_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# mobile_requerant_personne
elif id == '102':
mobile_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# telephone_requerant_personne
elif id == '103':
telephone_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# fax_requerant_personne
elif id == '104':
fax_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
# courriel_requerant_personne
elif id == '105':
courriel_requerant_personne = \
one_requerant_personne_question['answers']['answer']['value'] \
if 'answers' in one_requerant_personne_question and \
'answer' in one_requerant_personne_question['answers'] and \
'value' in one_requerant_personne_question['answers']['answer'] else None
"""( 1.4) Maitre de l'ouvrage ou mandataire - Entreprise / Bureau d'ingénieur """
section_maitre_ouvrage_entreprise = [sec for sec in section if sec['@id'] == '2']
section_maitre_ouvrage_entreprise = section_maitre_ouvrage_entreprise[0]\
if len(section_maitre_ouvrage_entreprise) > 0 else None
maitre_ouvrage_entreprise_question = \
section_maitre_ouvrage_entreprise['questions']['question'] \
if section_maitre_ouvrage_entreprise and \
'questions' in section_requerant_entreprise and \
'question' in section_maitre_ouvrage_entreprise['questions'] else None
for one_maitre_ouvrage_entreprise_question in maitre_ouvrage_entreprise_question:
id = one_maitre_ouvrage_entreprise_question['@id']
# nom_maitre_ouvrage_entreprise
if id == '1':
nom_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
# rue_maitre_ouvrage_entreprise
elif id == '2':
rue_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
# localite_maitre_ouvrage_entreprise
elif id == '3':
localite_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
# telephone_maitre_ouvrage_entreprise
elif id == '4':
telephone_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
# fax_maitre_ouvrage_entreprise
elif id == '5':
fax_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
# courriel_maitre_ouvrage_entreprise
elif id == '6':
courriel_maitre_ouvrage_entreprise = \
one_maitre_ouvrage_entreprise_question['answers']['answer']['value']\
if 'answers' in one_maitre_ouvrage_entreprise_question \
and 'answer' in one_maitre_ouvrage_entreprise_question['answers']\
and 'value' in one_maitre_ouvrage_entreprise_question[
'answers']['answer'] else None
"""( 1.5) Maitre de l'ouvrage ou mandataire - Direction locale """
section_maitre_ouvrage_dir_loc = [sec for sec in section if sec['@id'] == '33']
section_maitre_ouvrage_dir_loc = section_maitre_ouvrage_dir_loc[0]\
if len(section_maitre_ouvrage_dir_loc) > 0 else None
maitre_ouvrage_question_dir_loc = \
section_maitre_ouvrage_dir_loc['questions']['question'] \
if section_maitre_ouvrage_dir_loc and \
'questions' in section_maitre_ouvrage_dir_loc and \
'question' in section_maitre_ouvrage_dir_loc['questions'] else None
for one_maitre_ouvrage_question_dir_loc in maitre_ouvrage_question_dir_loc:
id = one_maitre_ouvrage_question_dir_loc['@id']
# nom_maitre_ouvrage_dir_loc
if id == '106':
nom_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# prenom_maitre_ouvrage_dir_loc
elif id == '107':
prenom_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# mobile_maitre_ouvrage_dir_loc
elif id == '108':
mobile_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# telephone_maitre_ouvrage_dir_loc
elif id == '109':
telephone_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# fax_maitre_ouvrage_dir_loc
elif id == '110':
fax_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
# courriel_maitre_ouvrage_dir_loc
elif id == '111':
courriel_maitre_ouvrage_dir_loc = \
one_maitre_ouvrage_question_dir_loc['answers']['answer']['value'] \
if 'answers' in one_maitre_ouvrage_question_dir_loc and \
'answer' in one_maitre_ouvrage_question_dir_loc['answers'] and \
'value' in one_maitre_ouvrage_question_dir_loc['answers']['answer'] else None
"""( 1.6) Entrepreneur agréé - Entreprise """
section_entrepreneur_entreprise = [sec for sec in section if sec['@id'] == '3']
section_entrepreneur_entreprise = section_entrepreneur_entreprise[0]\
if len(section_entrepreneur_entreprise) > 0 else None
entrepreneur_entreprise_question = \
section_entrepreneur_entreprise['questions']['question']\
if section_entrepreneur_entreprise and 'questions' in section_entrepreneur_entreprise \
and 'question' in section_entrepreneur_entreprise['questions'] else None
for one_entrepreneur_entreprise_question in entrepreneur_entreprise_question:
id = one_entrepreneur_entreprise_question['@id']
# nom_entrepreneur_entreprise
if id == '1':
nom_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
# rue_entrepreneur_entreprise
if id == '2':
rue_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
# npa_localite_entrepreneur_entreprise
if id == '3':
npa_localite_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
# telephone_entrepreneur_entreprise
if id == '4':
telephone_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
# fax_entrepreneur_entreprise
if id == '5':
fax_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
# courriel_entrepreneur_entreprise
if id == '6':
courriel_entrepreneur_entreprise = \
one_entrepreneur_entreprise_question['answers']['answer']['value']\
if 'answers' in one_entrepreneur_entreprise_question and \
'answer' in one_entrepreneur_entreprise_question['answers'] and \
'value' in one_entrepreneur_entreprise_question['answers']['answer'] else None
"""( 1.7) Entrepreneur agréé - Responsable des travaux """
section_entrepreneur_resp_travaux = [sec for sec in section if sec['@id'] == '34']
section_entrepreneur_resp_travaux = section_entrepreneur_resp_travaux[0]\
if len(section_entrepreneur_resp_travaux) > 0 else None
section_entrepreneur_resp_travaux_question = \
section_entrepreneur_resp_travaux['questions']['question']\
if section_entrepreneur_resp_travaux and \
'questions' in section_entrepreneur_resp_travaux and \
'question' in section_entrepreneur_resp_travaux['questions'] else None
for one_section_entrepreneur_resp_travaux_question \
in section_entrepreneur_resp_travaux_question:
id = one_section_entrepreneur_resp_travaux_question['@id']
# nom_entrepreneur_responsable_travaux
if id == '112':
nom_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
# prenom_entrepreneur_responsable_travaux
if id == '113':
prenom_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
# mobile_entrepreneur_responsable_travaux
if id == '114':
mobile_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
# telephone_entrepreneur_responsable_travaux
if id == '115':
telephone_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
# fax_entrepreneur_responsable_travaux
if id == '116':
fax_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
# courriel_entrepreneur_responsable_travaux
if id == '117':
courriel_entrepreneur_responsable_travaux = \
one_section_entrepreneur_resp_travaux_question['answers']['answer']['value']\
if 'answers' in one_section_entrepreneur_resp_travaux_question \
and 'answer' in one_section_entrepreneur_resp_travaux_question['answers']\
and 'value' in one_section_entrepreneur_resp_travaux_question[
'answers']['answer'] else None
"""( 1.8) Facturation """
section_facturation = [sec for sec in section if sec['@id'] == '4']
section_facturation = section_facturation[0] if len(section_facturation) > 0 else None
facturation_question = section_facturation['questions']['question']\
if section_facturation and 'questions' in section_facturation \
and 'question' in section_facturation['questions'] else None
if section_facturation:
id = section_facturation['@id']
if id == '4':
adresse_facturation = section_facturation['answers']['answer']['value']\
if 'answers' in section_facturation \
and 'answer' in section_facturation['answers']\
and 'value' in section_facturation['answers']['answer'] else None
"""( 1.9) Localisation et période """
section_loc_periode = [sec for sec in section if sec['@id'] == '5']
section_loc_periode = section_loc_periode[0] if len(section_loc_periode) > 0 else None
loc_periode_question = section_loc_periode['questions']['question']\
if section_loc_periode and 'questions' in section_loc_periode and \
'question' in section_loc_periode['questions'] else None
for one_loc_periode_question in loc_periode_question:
id = one_loc_periode_question['@id']
# commune
if id == '12':
commune = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# service_a_appliquer
elif id == '93':
service_a_appliquer = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# coordonnee_x
elif id == '94':
coordonnee_x = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# coordonnee_y
elif id == '95':
coordonnee_y = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# cadastre
elif id == '96':
cadastre = one_loc_periode_question['answers']['answer']['value'] \
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# num_bien_fonds
elif id == '97':
num_bien_fonds = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# lieu_dit
elif id == '99':
lieu_dit = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# geometry_collection 223
elif id == '223':
geometry_collection = one_loc_periode_question['answers']['answer']['value']\
if 'answers' in one_loc_periode_question and \
'answer' in one_loc_periode_question['answers'] and \
'value' in one_loc_periode_question['answers']['answer'] else None
# Evenement model
evenement_model = models.Evenement(
id_entite=cls.settings['id_entite_xml_import'],
id_responsable=cls.settings['id_responsable_xml_import'],
type=int(cls.settings['fouille_evenement_id']),
libelle=title,
description=description_fouille,
ref_camac=ref_camac,
date_debut=datetime.datetime.strptime(date_debut, cls.settings['xml_date_template'])
if date_debut else None,
heure_debut=heure_debut,
date_fin=datetime.datetime.strptime(date_fin, cls.settings['xml_date_template'])
if date_fin else None,
heure_fin=heure_fin,
nom_requerant=nom_requerant_entreprise,
rue_requerant=rue_requerant_entreprise,
localite_requerant=localite_requerant_entreprise,
telephone_requerant=telephone_requerant_entreprise,
fax_requerant=fax_requerant_entreprise,
courriel_requerant=courriel_requerant_entreprise,
nom_contact=nom_requerant_personne,
prenom_contact=prenom_requerant_personne,
mobile_contact=mobile_requerant_personne,
telephone_contact=telephone_requerant_personne,
fax_contact=fax_requerant_personne,
courriel_contact=courriel_requerant_personne,
id_utilisateur_ajout=cls.settings['id_user_ajout_xml_import'],
id_utilisateur_modification=cls.settings['id_user_ajout_xml_import'],
localisation=commune,
numero_dossier=Utils.generate_numero_dossier(cls.request, int(cls.settings[
'fouille_evenement_id']))
)
if evenement_model:
cls.request.dbsession.add(evenement_model)
cls.request.dbsession.flush()
max_event_id = evenement_model.id
# Fouille model
fouille_model = models.Fouille(
id_evenement=max_event_id,
nom_maitre_ouvrage=nom_maitre_ouvrage_entreprise,
rue_maitre_ouvrage=rue_maitre_ouvrage_entreprise,
localite_maitre_ouvrage=localite_maitre_ouvrage_entreprise,
telephone_maitre_ouvrage=telephone_maitre_ouvrage_entreprise,
fax_maitre_ouvrage=fax_maitre_ouvrage_entreprise,
courriel_maitre_ouvrage=courriel_maitre_ouvrage_entreprise,
nom_direction_locale=nom_maitre_ouvrage_dir_loc,
prenom_direction_locale=prenom_maitre_ouvrage_dir_loc,
mobile_direction_locale=mobile_maitre_ouvrage_dir_loc,
telephone_direction_locale=telephone_maitre_ouvrage_dir_loc,
fax_direction_locale=fax_maitre_ouvrage_dir_loc,
courriel_direction_locale=courriel_maitre_ouvrage_dir_loc,
nom_entrepreneur=nom_entrepreneur_entreprise,
rue_entrepreneur=rue_entrepreneur_entreprise,
localite_entrepreneur=npa_localite_entrepreneur_entreprise,
telephone_entrepreneur=telephone_entrepreneur_entreprise,
fax_entrepreneur=fax_entrepreneur_entreprise,
courriel_entrepreneur=courriel_entrepreneur_entreprise,
nom_responsable_travaux=nom_entrepreneur_responsable_travaux,
prenom_responsable_travaux=prenom_entrepreneur_responsable_travaux,
mobile_responsable_travaux=mobile_entrepreneur_responsable_travaux,
telephone_responsable_travaux=telephone_entrepreneur_responsable_travaux,
fax_responsable_travaux=fax_entrepreneur_responsable_travaux,
courriel_responsable_travaux=courriel_entrepreneur_responsable_travaux
)
cls.request.dbsession.add(fouille_model)
# Evenement point model
if geometry_collection is None:
geometry = json.loads('{"type":"Point","coordinates":['
+ coordonnee_x + ',' + coordonnee_y + ']}')
evenement_point_model = models.EvenementPoint(id_evenement=max_event_id)
evenement_point_model.set_json_geometry(str(geometry), cls.settings['srid'])
pass
# cls.request.dbsession.add(evenement_point_model)
"""
# Evenement polygon model
if geometry_collection is not None:
evenement_polygon_model = models.EvenementPolygone(id_evenement=max_event_id)
evenement_polygon_model.set_geometry_collection(str(geometry_collection),
cls.settings['srid'])
cls.request.dbsession.add(evenement_polygon_model)
"""
Utils.add_ev_geometries(cls.request, geometry_collection, max_event_id)
"""------------- (2) Form perturbation ---------------"""
perturbations_form = [f for f in form if f['@id'] == '41']
perturbations_form = perturbations_form[0] if len(perturbations_form) > 0 else None
if perturbations_form:
# Perturbations
perturbations_values = {}
section = perturbations_form['sections']['section'] if perturbations_form and 'sections' \
in perturbations_form and 'section' in perturbations_form['sections'] else None
if section and len(section) > 0:
"""( 2.1) Type perturbation """
section_type_perturbation = [sec for sec in section if sec['@id'] == '42']
section_type_perturbation = section_type_perturbation[0] if len(
section_type_perturbation) > 0 else None
type_perturbation_question = \
section_type_perturbation['questions']['question'] if section_type_perturbation \
and 'questions' in section_type_perturbation \
and 'question' in section_type_perturbation['questions'] else None
if section_type_perturbation:
id = section_type_perturbation['@id']
# type_pertubation
if id == '42':
item_id = type_perturbation_question['answers']['answer']['@item']
perturbations_values[item_id] = {}
type_pertubation = \
type_perturbation_question['answers']['answer']['value'] \
if 'answers' in type_perturbation_question and \
'answer' in type_perturbation_question['answers'] and \
'value' in type_perturbation_question['answers']['answer'] else None
if type_pertubation == "Occupation":
type_pertubation = int(
cls.settings['occupation_perturbation_id'])
elif type_pertubation == "Fermeture":
type_pertubation = int(
cls.settings['fermeture_perturbation_id'])
perturbations_values[item_id]['type_pertubation'] = type_pertubation
# Other perturbations types
section_type_other_perturbation = [sec for sec in section if sec['@id'] == '45']
section_type_other_perturbation = section_type_other_perturbation[0] \
if section_type_other_perturbation and \
len(section_type_other_perturbation) > 0 else section_type_other_perturbation
if section_type_other_perturbation:
type_other_perturbation_question = section_type_other_perturbation[
'questions']['question'] if section_type_other_perturbation and \
'questions' in section_type_other_perturbation and \
'question' in section_type_other_perturbation['questions'] else None
other_perturb_types_answers = cls.get_answers(type_other_perturbation_question)
for one_item_id in other_perturb_types_answers:
one_type_pertubation = other_perturb_types_answers[one_item_id]
if one_item_id and one_type_pertubation:
one_item_id = int(one_item_id)
perturbations_values[str(one_item_id + 1)] = {}
if one_type_pertubation == "Occupation":
perturbations_values[str(one_item_id + 1)]['type_pertubation'] = int(
cls.settings['occupation_perturbation_id'])
elif one_type_pertubation == "Fermeture":
perturbations_values[str(one_item_id + 1)]['type_pertubation'] = int(
cls.settings['fermeture_perturbation_id'])
"""(2.2) Occupation"""
section_occupation = [sec for sec in section if sec['@id'] == '41']
section_occupation = section_occupation[0] if len(section_occupation) > 0 else None
# Set occupation values
if section_occupation:
cls.set_ocuppations_values(section_occupation, perturbations_values)
"""(2.2) Fermeture"""
section_fermeture = [sec for sec in section if sec['@id'] == '43']
section_fermeture = section_fermeture[0] if len(section_fermeture) > 0 else None
# Set fermetures values
if section_fermeture:
cls.set_fermetures_values(section_fermeture, perturbations_values)
for one_perturb_item_id in perturbations_values:
one_perturb_item = perturbations_values[one_perturb_item_id]
if 'type_pertubation' in one_perturb_item:
type_pertubation = one_perturb_item['type_pertubation']
# Check date_debut, if less than 24h, urgence=true
urgence = False
date_debut = one_perturb_item['date_debut'] \
if 'date_debut' in one_perturb_item else None
heure_debut = one_perturb_item['heure_debut'] \
if 'heure_debut' in one_perturb_item else None
date_fin = one_perturb_item['date_fin'] \
if 'date_fin' in one_perturb_item else None
heure_fin = one_perturb_item['heure_fin'] \
if 'heure_fin' in one_perturb_item else None
if date_debut is not None and heure_debut is not None:
date_time_str = str(date_debut) + ' ' + str(heure_debut)
date_time_obj = datetime.datetime.strptime(date_time_str, cls.settings[
'xml_date_template'] + ' ' + cls.settings['xml_heure_template'])
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + datetime.timedelta(days=1):
urgence = True
perturbation_model = models.Perturbation(
id_evenement=max_event_id,
type=type_pertubation,
# description=description,
date_debut=datetime.datetime.strptime(date_debut, cls.settings[
'xml_date_template']) if date_debut else None,
heure_debut=heure_debut,
date_fin=datetime.datetime.strptime(date_fin, cls.settings[
'xml_date_template']) if date_fin else None,
heure_fin=heure_fin,
nom_responsable_trafic=one_perturb_item['nom']
if 'nom' in one_perturb_item else None,
prenom_responsable_trafic=one_perturb_item['prenom']
if 'prenom' in one_perturb_item else None,
mobile_responsable_trafic=one_perturb_item['mobile']
if 'mobile' in one_perturb_item else None,
telephone_responsable_trafic=one_perturb_item['telephone']
if 'telephone' in one_perturb_item else None,
# fax_responsable_trafic=fax,
courriel_responsable_trafic=one_perturb_item['courriel']
if 'courriel' in one_perturb_item else None,
remarque=one_perturb_item['remarque']
if 'remarque' in one_perturb_item else None,
id_utilisateur_ajout=cls.settings['id_user_ajout_xml_import'],
id_utilisateur_modification=cls.settings['id_user_ajout_xml_import'],
urgence=urgence,
etat=cls.settings['perturbation_etat_acceptee_code']
if urgence is True else cls.settings['perturbation_etat_attente_code']
)
if perturbation_model:
cls.request.dbsession.add(perturbation_model)
cls.request.dbsession.flush()
max_perturbation_id = perturbation_model.id
# Occupation
if type_pertubation == int(
cls.settings['occupation_perturbation_id']) and section_fermeture:
occupation_model = models.Occupation(
id_perturbation=max_perturbation_id,
id_responsable_regulation=cls.settings['id_responsable_xml_import'],
type_occupation=one_perturb_item['type_occupation']
if 'type_occupation' in one_perturb_item else None,
type_regulation=one_perturb_item['type_regulation']
if 'type_regulation' in one_perturb_item else None,
voies_condamnees=one_perturb_item['voies_condamnees']
if 'voies_condamnees' in one_perturb_item else None,
largeur_gabarit=one_perturb_item['largeur_gabarit']
if 'largeur_gabarit' in one_perturb_item else None,
hauteur_gabarit=one_perturb_item['hauteur_gabarit']
if 'hauteur_gabarit' in one_perturb_item else None,
heure_pointe=one_perturb_item['occupation_heures_pointes']
if 'occupation_heures_pointes' in one_perturb_item else None,
week_end=one_perturb_item['occupation_weekend']
if '' in one_perturb_item else None)
cls.request.dbsession.add(occupation_model)
# Fermeture
elif type_pertubation == int(
cls.settings['fermeture_perturbation_id']) and section_fermeture:
fermeture_model = models.Fermeture(
id_perturbation=max_perturbation_id,
deviation=one_perturb_item['deviation']
if 'deviation' in one_perturb_item else None,
id_responsable=cls.settings['id_responsable_xml_import'])
cls.request.dbsession.add(fermeture_model)
# Geometries
Utils.add_perturb_geometries(cls.request, geometry_collection, max_perturbation_id)
# flush to raise exception in case of ionvalid data
log.debug("Debug_GL: add_file_data_fouille: Tentative de flush final pour ce fichier")
cls.request.dbsession.flush()
log.debug("Debug_GL: add_file_data_fouille: Flush effectué")
except Exception as e:
log.error(str(e))
log.debug("Debug_GL: add_file_data_fouille: Exception -> Tentative de rollback ")
cls.request.dbsession.rollback()
log.debug("Debug_GL: add_file_data_fouille: Exception -> Rollback effectué")
# raise e
log.debug("Debug_GL: add_file_data_fouille: Exception -> return False")
return False
return True
@classmethod
def get_answers(cls, question):
items = {}
answers = question['answers']['answer'] if 'answers' in question and 'answer' in question['answers'] else None
# Array
if answers and isinstance(answers, list):
for answer in answers:
items[answer['@item']] = answer['value']
else:
items[question['answers']['answer']['@item']] = question['answers']['answer']['value']
return items
@classmethod
def set_ocuppations_values(cls, section, perturb_obj):
question = section['questions']['question'] if section and 'questions' in section \
and 'question' in section['questions'] else None
for one_question in question:
id = one_question['@id']
answsers = cls.get_answers(one_question)
for one_answser in answsers:
answser_value = answsers[one_answser] if one_answser in answsers and answsers[one_answser] != '' else None
if answser_value:
# occupation_heures_pointes
if id == '20':
answser_value = True if answser_value is not None and answser_value.upper() == "OUI" else answser_value
if answser_value is not True and answser_value.upper() != "OUI":
answser_value = False if answser_value is not True and answser_value.upper() == "NON" else None
perturb_obj[one_answser]['occupation_heures_pointes'] = answser_value
# occupation_weekend
elif id == '21':
answser_value = True if answser_value is not None and answser_value.upper() == "OUI" else answser_value
if answser_value is not True and answser_value.upper() != "OUI":
answser_value = False if answser_value is not True and answser_value.upper() == "NON" else None
perturb_obj[one_answser]['occupation_weekend'] = answser_value
# remarque
elif id == '24':
perturb_obj[one_answser]['remarque'] = answser_value
# nom
elif id == '100':
perturb_obj[one_answser]['nom'] = answser_value
# prenom
elif id == '101':
perturb_obj[one_answser]['prenom'] = answser_value
# mobile
elif id == '102':
perturb_obj[one_answser]['mobile'] = answser_value
# telephone
elif id == '103':
perturb_obj[one_answser]['telephone'] = answser_value
# courriel
elif id == '105':
perturb_obj[one_answser]['courriel'] = answser_value
# date_debut
elif id == '121':
date_debut = answser_value
date_debut = date_debut.replace(".", "-") if date_debut else None
perturb_obj[one_answser]['date_debut'] = date_debut
# heure_debut
elif id == '122':
heure_debut = answser_value
heure_debut = heure_debut.lower().replace(".", ":").replace("h", ":") if heure_debut else None
heure_debut = heure_debut + ":00" if heure_debut and len(heure_debut) <= 2 else heure_debut
heure_debut = "0" + heure_debut if heure_debut and len(heure_debut) == 3 else heure_debut
perturb_obj[one_answser]['heure_debut'] = heure_debut
# date_fin
elif id == '123':
date_fin = answser_value
date_fin = date_fin.replace(".", "-") if date_fin else None
perturb_obj[one_answser]['date_fin'] = date_fin
# heure_fin
elif id == '124':
heure_fin = answser_value
heure_fin = heure_fin.lower().replace(".", ":").replace("h", ":") if heure_fin else None
heure_fin = heure_fin + ":00" if heure_fin and len(heure_fin) <= 2 else heure_fin
heure_fin = "0" + heure_fin if heure_fin and len(heure_fin) == 3 else heure_fin
perturb_obj[one_answser]['heure_fin'] = heure_fin
# periode_occupation
elif id == '126':
perturb_obj[one_answser]['periode_occupation'] = answser_value
# type_occupation
elif id == '127':
perturb_obj[one_answser]['type_occupation'] = answser_value
# voies_condamnees
elif id == '128':
perturb_obj[one_answser]['voies_condamnees'] = answser_value
# type_regulation
elif id == '129':
perturb_obj[one_answser]['type_regulation'] = answser_value
# hauteur_gabarit
elif id == '130':
perturb_obj[one_answser]['hauteur_gabarit'] = answser_value
# largeur_gabarit
elif id == '131':
perturb_obj[one_answser]['largeur_gabarit'] = answser_value
@classmethod
def set_fermetures_values(cls, section, perturb_obj):
question = section['questions']['question'] if section and 'questions' in section \
and 'question' in section['questions'] else None
for one_question in question:
id = one_question['@id']
answsers = cls.get_answers(one_question)
for one_answser in answsers:
answser_value = answsers[one_answser] if one_answser in answsers and answsers[one_answser] != '' else None
if answser_value:
# remarque
if id == '24':
perturb_obj[one_answser]['remarque'] = answser_value
# nom
elif id == '100':
perturb_obj[one_answser]['nom'] = answser_value
# prenom
elif id == '101':
perturb_obj[one_answser]['prenom'] = answser_value
# mobile
elif id == '102':
perturb_obj[one_answser]['mobile'] = answser_value
# telephone
elif id == '103':
perturb_obj[one_answser]['telephone'] = answser_value
# courriel
elif id == '105':
perturb_obj[one_answser]['courriel'] = answser_value
# date_debut
elif id == '121':
date_debut = answser_value
date_debut = date_debut.replace(".", "-") if date_debut else None
perturb_obj[one_answser]['date_debut'] = date_debut
# heure_debut
elif id == '122':
heure_debut = answser_value
heure_debut = heure_debut.lower().replace(".", ":").replace("h", ":") if heure_debut else None
heure_debut = heure_debut + ":00" if heure_debut and len(heure_debut) <= 2 else heure_debut
heure_debut = "0" + heure_debut if heure_debut and len(heure_debut) == 3 else heure_debut
perturb_obj[one_answser]['heure_debut'] = heure_debut
# date_fin
elif id == '123':
date_fin = answser_value
date_fin = date_fin.replace(".", "-") if date_fin else None
perturb_obj[one_answser]['date_fin'] = date_fin
# heure_fin
elif id == '124':
heure_fin = answser_value
heure_fin = heure_fin.lower().replace(".", ":").replace("h", ":") if heure_fin else None
heure_fin = heure_fin + ":00" if heure_fin and len(heure_fin) <= 2 else heure_fin
heure_fin = "0" + heure_fin if heure_fin and len(heure_fin) == 3 else heure_fin
perturb_obj[one_answser]['heure_fin'] = heure_fin
# type_fermeture
elif id == '132':
perturb_obj[one_answser]['type_fermeture'] = answser_value
# deviation
elif id == '133':
perturb_obj[one_answser]['deviation'] = answser_value
| 64.057194
| 127
| 0.453432
| 11,292
| 143,360
| 5.401346
| 0.031881
| 0.025987
| 0.058877
| 0.031807
| 0.932221
| 0.909349
| 0.893937
| 0.891969
| 0.890543
| 0.885723
| 0
| 0.010157
| 0.484263
| 143,360
| 2,237
| 128
| 64.085829
| 0.814664
| 0.052748
| 0
| 0.798658
| 0
| 0
| 0.083741
| 0.010727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007383
| false
| 0.001342
| 0.016779
| 0
| 0.041611
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f84fac38ebbc1496fa00dc3b49b73379c92020e8
| 7,253
|
py
|
Python
|
data.py
|
ZolileN/SMSBot
|
7a0573eb0481412f6e5cd13ccec769fd42cc6ec7
|
[
"MIT"
] | null | null | null |
data.py
|
ZolileN/SMSBot
|
7a0573eb0481412f6e5cd13ccec769fd42cc6ec7
|
[
"MIT"
] | null | null | null |
data.py
|
ZolileN/SMSBot
|
7a0573eb0481412f6e5cd13ccec769fd42cc6ec7
|
[
"MIT"
] | null | null | null |
pregdata = {
7 : "You need to have an ultrasound to determine the location of the pregnancy sac, size and vitality (heartbeat) of the fetus.",
8 : "You need to have an ultrasound to determine the location of the pregnancy sac, size and vitality (heartbeat) of the fetus.",
9 : "You need to have an ultrasound to determine the location of the pregnancy sac, size and vitality (heartbeat) of the fetus.",
10 : "You need to take the following test :Blood count, fasting glucose, urine culture, blood type (and RH antibodies level if negative), rubella antibodies, VDRL, CMV, antibodies, toxoplasma and HBsAG and if necessary an HIV test.",
11 : "You need to take the following test :Blood count, fasting glucose, urine culture, blood type (and RH antibodies level if negative), rubella antibodies, VDRL, CMV, antibodies, toxoplasma and HBsAG and if necessary an HIV test.",
12 : "Placental structure (chorionic placenta) and early chromosomal genetic diagnostic testing has to be done.",
13 : "Placental structure (chorionic placenta) and early chromosomal genetic diagnostic testing has to be done.",
14 : "You need have an ultrasound, nuchal translucency or fetal development test performed." ,
15 : "Ultrasound screening exam needs to be done.",
16 : "Ultrasound screening exam needs to be done.",
17 : "Triple screen test, fetoprotein or amniocentesis and blood count has to be done.",
18 : "Triple screen test, fetoprotein or amniocentesis and blood count has to be done.",
19 : "Triple screen test, fetoprotein or amniocentesis and blood count has to be done.",
20 : "Triple screen test, fetoprotein or amniocentesis and blood count has to be done.",
21 : "Triple screen test, fetoprotein or amniocentesis and blood count has to be done.",
22 : "Late ultrasound anomaly scan has to be done.",
23 : "Late ultrasound anomaly scan has to be done.",
24 : "Late ultrasound anomaly scan has to be done.",
25 : "Late ultrasound anomaly scan has to be done.",
26 : "You need have glucose tolerance test, blood count and RH antibody levels for those with negative blood type.",
27 : "You need have glucose tolerance test, blood count and RH antibody levels for those with negative blood type.",
28 : "You need have a visit to the physician to discuss glucose test results and talk about receiving RH immune globulin if RH negative",
29 : "You need have a visit to the physician to discuss glucose test results and talk about receiving RH immune globulin if RH negative",
30 : "You need have a visit to the physician to discuss glucose test results and talk about receiving RH immune globulin if RH negative",
31 : "Review of fetal growth has to be now!",
32 : "Review of fetal growth has to be now!",
33 : "Follow-up visit has to be done.",
34 : "Weekly tracking of fetal growth weight needs to be done.",
35 : "Weekly tracking of fetal growth weight needs to be done.",
36 : "Weekly tracking of fetal growth weight needs to be done.",
37 : "Weekly tracking of fetal growth weight needs to be done.",
38 : "Weekly tracking of fetal growth weight needs to be done.",
39 : "Weekly tracking of fetal growth weight needs to be done.",
40 : "Follow-up with doctor, ultrasound, monitoring biophysical profile every 2-4 days should be done."
}
babydata = {
0 : "Baby's dose for BCG vaccine for the prevention of TB and bladder cancer, first dose of HEPB vaccine for the prevention of Hepatitis B and first dose for POLIOVIRUS vaccine for the prevention of polio is due.",
1 : "No vaccination is pending.",
2 : "No vaccination is pending.",
3 : "No vaccination is pending.",
4 : "Baby's second dose of HEPB vaccine for the prevention of Hepatitis B and second dose of POLIOVIRUS vaccine for the prevenion of polio is due.",
5 : "No vaccination is pending.",
6 : "Baby's first dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, first dose of Hib vaccine for the prevention of infections, first dose of PCV vaccine for the prevention of Pneumonia, first dose of RV vaccine for the prevention of Diarrhea is due.",
7 : "Baby's first dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, first dose of Hib vaccine for the prevention of infections, first dose of PCV vaccine for the prevention of Pneumonia, first dose of RV vaccine for the prevention of Diarrhea is due.",
8 : "Baby's third dose of POLIOVIRUS for the prevention of polio is due.",
9 : "Baby's third dose of POLIOVIRUS for the prevention of polio is due.",
10 :"Baby's second dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, second dose of Hib vaccine for the prevention of infections, second dose of PCV vaccine for the prevention of Pneumonia, second dose of RV vaccine for the prevention of Diarrhea is due.",
11 : "Baby's second dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, second dose of Hib vaccine for the prevention of infections, second dose of PCV vaccine for the prevention of Pneumonia, second dose of RV vaccine for the prevention of Diarrhea is due.",
12 : "Baby's third dose of HEPB vaccine for the prevention of Hepatitis B is due.",
13 : "Baby's third dose of HEPB vaccine for the prevention of Hepatitis B is due.",
14 : "Baby's third dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, third dose of Hib vaccine for the prevention of infections, third dose of PCV vaccine for the prevention of Pneumonia, third dose of RV vaccine for the prevention of Diarrhea is due.",
15 : "Baby's third dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, third dose of Hib vaccine for the prevention of infections, third dose of PCV vaccine for the prevention of Pneumonia, third dose of RV vaccine for the prevention of Diarrhea is due.",
16 : "Baby's third dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, third dose of Hib vaccine for the prevention of infections, third dose of PCV vaccine for the prevention of Pneumonia, third dose of RV vaccine for the prevention of Diarrhea is due.",
36 : "Baby's first dose of TYPHOID vaccine for the prevention of Typhoid fever and Diarrhea and first dose of MMR vaccine for the prevention of Mumps and Rubella is due.",
38 : "Baby's fourth dose of DTP vaccine for the prevention of Dipther ia, Tetanus and Pertussis, fourth dose of Hib vaccine for the prevention of infections, fourth dose of PCV vaccine for the prevention of Pneumonia is due.",
52 : "Baby's first dose of Varicella vaccine for the prevention of Chicken pox and HepA vaccine for the prevention of the Liver disease.",
55 : "Baby's second dose of MMR vaccine for the prevention of Measles, Mumps and Rubella and Varicella vaccine for the prevention of Chicken pox.",
58 : "Baby's second dose of HepA for the prevention of Liver disease is due.",
104 : "Baby's second dose of Typhoid vaccine for the prevention of Typhoid Fever and Diarrhea is due.",
364 : "Baby's Tdap vaccine for the prevention of the Dipther ia, tetanus, and pertuassis is due.",
468 : "Baby's dose of HPV vaccine for the prevention of cancer and warts is due.",
}
| 109.893939
| 286
| 0.751137
| 1,197
| 7,253
| 4.551378
| 0.164578
| 0.055066
| 0.143906
| 0.161894
| 0.867291
| 0.842878
| 0.832599
| 0.814244
| 0.748532
| 0.732562
| 0
| 0.018807
| 0.193575
| 7,253
| 65
| 287
| 111.584615
| 0.912635
| 0
| 0
| 0
| 0
| 0.333333
| 0.902523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3e21e0152d2dcfdb3e9b29ff0b8e71bc4ab437fb
| 748
|
py
|
Python
|
code_snippets/Gotchas.py
|
cclauss/py_regular_expressions
|
d5a1e01bbe11ebb32fbf56880fd9c3fb8d6f7d44
|
[
"MIT"
] | 1
|
2020-01-19T11:16:14.000Z
|
2020-01-19T11:16:14.000Z
|
code_snippets/Gotchas.py
|
cclauss/py_regular_expressions
|
d5a1e01bbe11ebb32fbf56880fd9c3fb8d6f7d44
|
[
"MIT"
] | null | null | null |
code_snippets/Gotchas.py
|
cclauss/py_regular_expressions
|
d5a1e01bbe11ebb32fbf56880fd9c3fb8d6f7d44
|
[
"MIT"
] | null | null | null |
bool(re.search(r'\t', 'cat\tdog'))
bool(re.search(r'\c', 'cat\tdog'))
print(re.sub(r'(?m)^', r'foo ', '1\n2\n'))
print(re.sub(r'(?m)$', r' baz', '1\n2\n'))
re.sub(r'[^,]*', r'{\g<0>}', ',cat,tiger')
regex.sub(r'[^,]*+', r'{\g<0>}', ',cat,tiger')
re.sub(r'(?<![^,])[^,]*', r'{\g<0>}', ',cat,tiger')
re.sub(r'\A([^,]+,){3}([^,]+)', r'\1(\2)', '1,2,3,4,5,6,7', count=1)
re.sub(r'\A((?:[^,]+,){3})([^,]+)', r'\1(\2)', '1,2,3,4,5,6,7', count=1)
re.findall(r'([^,]+,){3}', '1,2,3,4,5,6,7')
re.findall(r'(?:[^,]+,){3}', '1,2,3,4,5,6,7')
re.findall(r'[[:word:]]+', 'fox:αλεπού,eagle:αετός', flags=re.A)
regex.findall(r'[[:word:]]+', 'fox:αλεπού,eagle:αετός', flags=re.A)
regex.findall(r'[[:word:]]+', 'fox:αλεπού,eagle:αετός', flags=regex.A)
| 25.793103
| 72
| 0.470588
| 147
| 748
| 2.394558
| 0.238095
| 0.079545
| 0.102273
| 0.045455
| 0.809659
| 0.809659
| 0.735795
| 0.735795
| 0.6875
| 0.6875
| 0
| 0.065029
| 0.074866
| 748
| 28
| 73
| 26.714286
| 0.443642
| 0
| 0
| 0
| 0
| 0
| 0.477912
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e757ddca93e6325f9658af1346c8a196ab32dba
| 6,821
|
py
|
Python
|
loldib/getratings/models/NA/na_katarina/na_katarina_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_katarina/na_katarina_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_katarina/na_katarina_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Katarina_Bot_Aatrox(Ratings):
pass
class NA_Katarina_Bot_Ahri(Ratings):
pass
class NA_Katarina_Bot_Akali(Ratings):
pass
class NA_Katarina_Bot_Alistar(Ratings):
pass
class NA_Katarina_Bot_Amumu(Ratings):
pass
class NA_Katarina_Bot_Anivia(Ratings):
pass
class NA_Katarina_Bot_Annie(Ratings):
pass
class NA_Katarina_Bot_Ashe(Ratings):
pass
class NA_Katarina_Bot_AurelionSol(Ratings):
pass
class NA_Katarina_Bot_Azir(Ratings):
pass
class NA_Katarina_Bot_Bard(Ratings):
pass
class NA_Katarina_Bot_Blitzcrank(Ratings):
pass
class NA_Katarina_Bot_Brand(Ratings):
pass
class NA_Katarina_Bot_Braum(Ratings):
pass
class NA_Katarina_Bot_Caitlyn(Ratings):
pass
class NA_Katarina_Bot_Camille(Ratings):
pass
class NA_Katarina_Bot_Cassiopeia(Ratings):
pass
class NA_Katarina_Bot_Chogath(Ratings):
pass
class NA_Katarina_Bot_Corki(Ratings):
pass
class NA_Katarina_Bot_Darius(Ratings):
pass
class NA_Katarina_Bot_Diana(Ratings):
pass
class NA_Katarina_Bot_Draven(Ratings):
pass
class NA_Katarina_Bot_DrMundo(Ratings):
pass
class NA_Katarina_Bot_Ekko(Ratings):
pass
class NA_Katarina_Bot_Elise(Ratings):
pass
class NA_Katarina_Bot_Evelynn(Ratings):
pass
class NA_Katarina_Bot_Ezreal(Ratings):
pass
class NA_Katarina_Bot_Fiddlesticks(Ratings):
pass
class NA_Katarina_Bot_Fiora(Ratings):
pass
class NA_Katarina_Bot_Fizz(Ratings):
pass
class NA_Katarina_Bot_Galio(Ratings):
pass
class NA_Katarina_Bot_Gangplank(Ratings):
pass
class NA_Katarina_Bot_Garen(Ratings):
pass
class NA_Katarina_Bot_Gnar(Ratings):
pass
class NA_Katarina_Bot_Gragas(Ratings):
pass
class NA_Katarina_Bot_Graves(Ratings):
pass
class NA_Katarina_Bot_Hecarim(Ratings):
pass
class NA_Katarina_Bot_Heimerdinger(Ratings):
pass
class NA_Katarina_Bot_Illaoi(Ratings):
pass
class NA_Katarina_Bot_Irelia(Ratings):
pass
class NA_Katarina_Bot_Ivern(Ratings):
pass
class NA_Katarina_Bot_Janna(Ratings):
pass
class NA_Katarina_Bot_JarvanIV(Ratings):
pass
class NA_Katarina_Bot_Jax(Ratings):
pass
class NA_Katarina_Bot_Jayce(Ratings):
pass
class NA_Katarina_Bot_Jhin(Ratings):
pass
class NA_Katarina_Bot_Jinx(Ratings):
pass
class NA_Katarina_Bot_Kalista(Ratings):
pass
class NA_Katarina_Bot_Karma(Ratings):
pass
class NA_Katarina_Bot_Karthus(Ratings):
pass
class NA_Katarina_Bot_Kassadin(Ratings):
pass
class NA_Katarina_Bot_Katarina(Ratings):
pass
class NA_Katarina_Bot_Kayle(Ratings):
pass
class NA_Katarina_Bot_Kayn(Ratings):
pass
class NA_Katarina_Bot_Kennen(Ratings):
pass
class NA_Katarina_Bot_Khazix(Ratings):
pass
class NA_Katarina_Bot_Kindred(Ratings):
pass
class NA_Katarina_Bot_Kled(Ratings):
pass
class NA_Katarina_Bot_KogMaw(Ratings):
pass
class NA_Katarina_Bot_Leblanc(Ratings):
pass
class NA_Katarina_Bot_LeeSin(Ratings):
pass
class NA_Katarina_Bot_Leona(Ratings):
pass
class NA_Katarina_Bot_Lissandra(Ratings):
pass
class NA_Katarina_Bot_Lucian(Ratings):
pass
class NA_Katarina_Bot_Lulu(Ratings):
pass
class NA_Katarina_Bot_Lux(Ratings):
pass
class NA_Katarina_Bot_Malphite(Ratings):
pass
class NA_Katarina_Bot_Malzahar(Ratings):
pass
class NA_Katarina_Bot_Maokai(Ratings):
pass
class NA_Katarina_Bot_MasterYi(Ratings):
pass
class NA_Katarina_Bot_MissFortune(Ratings):
pass
class NA_Katarina_Bot_MonkeyKing(Ratings):
pass
class NA_Katarina_Bot_Mordekaiser(Ratings):
pass
class NA_Katarina_Bot_Morgana(Ratings):
pass
class NA_Katarina_Bot_Nami(Ratings):
pass
class NA_Katarina_Bot_Nasus(Ratings):
pass
class NA_Katarina_Bot_Nautilus(Ratings):
pass
class NA_Katarina_Bot_Nidalee(Ratings):
pass
class NA_Katarina_Bot_Nocturne(Ratings):
pass
class NA_Katarina_Bot_Nunu(Ratings):
pass
class NA_Katarina_Bot_Olaf(Ratings):
pass
class NA_Katarina_Bot_Orianna(Ratings):
pass
class NA_Katarina_Bot_Ornn(Ratings):
pass
class NA_Katarina_Bot_Pantheon(Ratings):
pass
class NA_Katarina_Bot_Poppy(Ratings):
pass
class NA_Katarina_Bot_Quinn(Ratings):
pass
class NA_Katarina_Bot_Rakan(Ratings):
pass
class NA_Katarina_Bot_Rammus(Ratings):
pass
class NA_Katarina_Bot_RekSai(Ratings):
pass
class NA_Katarina_Bot_Renekton(Ratings):
pass
class NA_Katarina_Bot_Rengar(Ratings):
pass
class NA_Katarina_Bot_Riven(Ratings):
pass
class NA_Katarina_Bot_Rumble(Ratings):
pass
class NA_Katarina_Bot_Ryze(Ratings):
pass
class NA_Katarina_Bot_Sejuani(Ratings):
pass
class NA_Katarina_Bot_Shaco(Ratings):
pass
class NA_Katarina_Bot_Shen(Ratings):
pass
class NA_Katarina_Bot_Shyvana(Ratings):
pass
class NA_Katarina_Bot_Singed(Ratings):
pass
class NA_Katarina_Bot_Sion(Ratings):
pass
class NA_Katarina_Bot_Sivir(Ratings):
pass
class NA_Katarina_Bot_Skarner(Ratings):
pass
class NA_Katarina_Bot_Sona(Ratings):
pass
class NA_Katarina_Bot_Soraka(Ratings):
pass
class NA_Katarina_Bot_Swain(Ratings):
pass
class NA_Katarina_Bot_Syndra(Ratings):
pass
class NA_Katarina_Bot_TahmKench(Ratings):
pass
class NA_Katarina_Bot_Taliyah(Ratings):
pass
class NA_Katarina_Bot_Talon(Ratings):
pass
class NA_Katarina_Bot_Taric(Ratings):
pass
class NA_Katarina_Bot_Teemo(Ratings):
pass
class NA_Katarina_Bot_Thresh(Ratings):
pass
class NA_Katarina_Bot_Tristana(Ratings):
pass
class NA_Katarina_Bot_Trundle(Ratings):
pass
class NA_Katarina_Bot_Tryndamere(Ratings):
pass
class NA_Katarina_Bot_TwistedFate(Ratings):
pass
class NA_Katarina_Bot_Twitch(Ratings):
pass
class NA_Katarina_Bot_Udyr(Ratings):
pass
class NA_Katarina_Bot_Urgot(Ratings):
pass
class NA_Katarina_Bot_Varus(Ratings):
pass
class NA_Katarina_Bot_Vayne(Ratings):
pass
class NA_Katarina_Bot_Veigar(Ratings):
pass
class NA_Katarina_Bot_Velkoz(Ratings):
pass
class NA_Katarina_Bot_Vi(Ratings):
pass
class NA_Katarina_Bot_Viktor(Ratings):
pass
class NA_Katarina_Bot_Vladimir(Ratings):
pass
class NA_Katarina_Bot_Volibear(Ratings):
pass
class NA_Katarina_Bot_Warwick(Ratings):
pass
class NA_Katarina_Bot_Xayah(Ratings):
pass
class NA_Katarina_Bot_Xerath(Ratings):
pass
class NA_Katarina_Bot_XinZhao(Ratings):
pass
class NA_Katarina_Bot_Yasuo(Ratings):
pass
class NA_Katarina_Bot_Yorick(Ratings):
pass
class NA_Katarina_Bot_Zac(Ratings):
pass
class NA_Katarina_Bot_Zed(Ratings):
pass
class NA_Katarina_Bot_Ziggs(Ratings):
pass
class NA_Katarina_Bot_Zilean(Ratings):
pass
class NA_Katarina_Bot_Zyra(Ratings):
pass
| 16.357314
| 46
| 0.776133
| 972
| 6,821
| 5.020576
| 0.151235
| 0.197951
| 0.42418
| 0.509016
| 0.814139
| 0.814139
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162879
| 6,821
| 416
| 47
| 16.396635
| 0.854641
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
e43d844afd8af20a746eeb882e95f1030fa04a4f
| 20
|
py
|
Python
|
src/test/resources/expressions/slice/slice.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2019-11-23T10:19:43.000Z
|
2021-03-19T03:18:30.000Z
|
src/test/resources/expressions/slice/slice.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 46
|
2019-11-23T12:11:52.000Z
|
2022-03-07T13:39:12.000Z
|
src/test/resources/expressions/slice/slice.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2020-03-02T13:48:45.000Z
|
2020-03-06T09:33:25.000Z
|
[1, 2][0:3:2][:][0]
| 10
| 19
| 0.3
| 6
| 20
| 1
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.1
| 20
| 1
| 20
| 20
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e44ee4a1af9468bb5f5ad151025aa87c332b5486
| 317
|
py
|
Python
|
bin/common.py
|
roim/sasha
|
c4f6eee02394fe0923c431b58cfadff15c140be8
|
[
"BSD-3-Clause"
] | 2
|
2015-01-28T10:08:40.000Z
|
2015-02-19T06:24:14.000Z
|
bin/common.py
|
roim/sasha
|
c4f6eee02394fe0923c431b58cfadff15c140be8
|
[
"BSD-3-Clause"
] | null | null | null |
bin/common.py
|
roim/sasha
|
c4f6eee02394fe0923c431b58cfadff15c140be8
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
def executeCommand(command):
proc = subprocess.Popen([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return proc.communicate()
def executeParallelCommand(command):
proc = subprocess.Popen([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return proc
| 35.222222
| 95
| 0.798107
| 37
| 317
| 6.837838
| 0.405405
| 0.221344
| 0.166008
| 0.205534
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0
| 0
| 0.085174
| 317
| 9
| 96
| 35.222222
| 0.872414
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e4cf51d8cd75705df051cd961055dcbc1d53ebcd
| 514
|
py
|
Python
|
exrec/evaluation/metrics/rating.py
|
InkToYou/Ex-Rec
|
25cfc74604502b3d59abc44b586d1cb7f4a343bd
|
[
"MIT"
] | null | null | null |
exrec/evaluation/metrics/rating.py
|
InkToYou/Ex-Rec
|
25cfc74604502b3d59abc44b586d1cb7f4a343bd
|
[
"MIT"
] | null | null | null |
exrec/evaluation/metrics/rating.py
|
InkToYou/Ex-Rec
|
25cfc74604502b3d59abc44b586d1cb7f4a343bd
|
[
"MIT"
] | null | null | null |
from typing import Sequence
from sklearn import metrics
class MAE:
def calc(self, true: Sequence[float], pred: Sequence[float]) -> float:
return metrics.mean_absolute_error(true, pred)
class MSE:
def calc(self, true: Sequence[float], pred: Sequence[float]) -> float:
return metrics.mean_squared_error(true, pred, squared=True)
class RMSE:
def calc(self, true: Sequence[float], pred: Sequence[float]) -> float:
return metrics.mean_squared_error(true, pred, squared=False)
| 27.052632
| 74
| 0.708171
| 69
| 514
| 5.188406
| 0.318841
| 0.217877
| 0.092179
| 0.125698
| 0.712291
| 0.712291
| 0.712291
| 0.712291
| 0.712291
| 0.712291
| 0
| 0
| 0.178988
| 514
| 18
| 75
| 28.555556
| 0.848341
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.272727
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
5f539e89c04a3824f52ef7c5cad3d5ad2b6d7e86
| 243
|
py
|
Python
|
kiqpo/core/math.py
|
bionic-py/Bionic
|
a54c85107a6a2aa9a9563b6b3e1f9bb64d63faa4
|
[
"MIT"
] | 9
|
2021-10-31T03:38:16.000Z
|
2021-12-17T00:03:36.000Z
|
kiqpo/core/math.py
|
bionic-py/Bionic
|
a54c85107a6a2aa9a9563b6b3e1f9bb64d63faa4
|
[
"MIT"
] | 12
|
2021-11-11T14:18:09.000Z
|
2021-12-03T14:00:25.000Z
|
kiqpo/core/math.py
|
kiqpo/kiqpo
|
a54c85107a6a2aa9a9563b6b3e1f9bb64d63faa4
|
[
"MIT"
] | 3
|
2022-03-03T18:30:53.000Z
|
2022-03-09T13:29:39.000Z
|
def screen(int):
size = str(int)
return str(size+"%")
def vh(int):
size = str(int)
return str(size+"vh")
def vw(int):
size = str(int)
return str(size+"vw")
def px(int):
size = str(int)
return str(size+"px")
| 14.294118
| 25
| 0.55144
| 39
| 243
| 3.435897
| 0.230769
| 0.208955
| 0.298507
| 0.38806
| 0.776119
| 0.776119
| 0.776119
| 0
| 0
| 0
| 0
| 0
| 0.263374
| 243
| 17
| 26
| 14.294118
| 0.748603
| 0
| 0
| 0.333333
| 0
| 0
| 0.028689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
5fbfb1aa851f90e6908a628a91b36b6226f763b3
| 7,539
|
py
|
Python
|
soda/core/tests/data_source/test_schema_changes.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | null | null | null |
soda/core/tests/data_source/test_schema_changes.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | null | null | null |
soda/core/tests/data_source/test_schema_changes.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | null | null | null |
from soda.execution.check_outcome import CheckOutcome
from soda.execution.data_source import DataSource
from soda.execution.data_type import DataType
from tests.helpers.common_test_tables import customers_test_table
from tests.helpers.scanner import Scanner
def test_schema_changes_pass(scanner: Scanner):
table_name = scanner.ensure_test_table(customers_test_table)
data_source = scanner.data_source
schema_metric_value_derived_from_test_table = derive_schema_metric_value_from_test_table(
customers_test_table, data_source
)
scan = scanner.create_test_scan()
scan.mock_historic_values(
metric_identity=f"metric-{scan._scan_definition_name}-{scanner.data_source.data_source_name}-{table_name}-schema",
metric_values=[schema_metric_value_derived_from_test_table],
)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- schema:
fail:
when schema changes: any
"""
)
scan.execute()
scan.assert_all_checks_pass()
def test_schema_changes_column_addition(scanner: Scanner):
table_name = scanner.ensure_test_table(customers_test_table)
data_source = scanner.data_source
# start from the historic measurement value
schema_metric_value_derived_from_test_table = derive_schema_metric_value_from_test_table(
customers_test_table, data_source
)
# remove the 4th column from the historic metric value
# this will result in schema check discovering a column being added
schema_metric_value_derived_from_test_table.pop(3)
scan = scanner.create_test_scan()
scan.mock_historic_values(
metric_identity=f"metric-{scan._scan_definition_name}-{scanner.data_source.data_source_name}-{table_name}-schema",
metric_values=[schema_metric_value_derived_from_test_table],
)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- schema:
fail:
when schema changes:
- column add
- schema:
fail:
when schema changes:
- column delete
- schema:
fail:
when schema changes:
- column type change
- schema:
fail:
when schema changes:
- column index change
- schema:
fail:
when schema changes: any
- schema:
warn:
when schema changes:
- column add
- schema:
warn:
when schema changes:
- column delete
- schema:
warn:
when schema changes:
- column type change
- schema:
warn:
when schema changes:
- column index change
- schema:
warn:
when schema changes: any
"""
)
scan.execute()
assert scan._checks[0].outcome == CheckOutcome.FAIL
assert scan._checks[1].outcome == CheckOutcome.PASS
assert scan._checks[2].outcome == CheckOutcome.PASS
assert scan._checks[3].outcome == CheckOutcome.FAIL
assert scan._checks[4].outcome == CheckOutcome.FAIL
assert scan._checks[5].outcome == CheckOutcome.WARN
assert scan._checks[6].outcome == CheckOutcome.PASS
assert scan._checks[7].outcome == CheckOutcome.PASS
assert scan._checks[8].outcome == CheckOutcome.WARN
assert scan._checks[9].outcome == CheckOutcome.WARN
def test_schema_changes_column_deletion(scanner: Scanner):
table_name = scanner.ensure_test_table(customers_test_table)
data_source = scanner.data_source
schema_metric_value_derived_from_test_table = derive_schema_metric_value_from_test_table(
customers_test_table, data_source
)
# remove the 3rd column from the historic metric value
schema_metric_value_derived_from_test_table.insert(
3,
{
"name": "extra",
"type": data_source.get_sql_type_for_schema_check(DataType.TEXT),
},
)
scan = scanner.create_test_scan()
scan.mock_historic_values(
metric_identity=f"metric-{scan._scan_definition_name}-{scanner.data_source.data_source_name}-{table_name}-schema",
metric_values=[schema_metric_value_derived_from_test_table],
)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- schema:
fail:
when schema changes:
- column add
- schema:
fail:
when schema changes:
- column delete
- schema:
fail:
when schema changes:
- column type change
- schema:
fail:
when schema changes:
- column index change
- schema:
fail:
when schema changes: any
- schema:
warn:
when schema changes:
- column add
- schema:
warn:
when schema changes:
- column delete
- schema:
warn:
when schema changes:
- column type change
- schema:
warn:
when schema changes:
- column index change
- schema:
warn:
when schema changes: any
"""
)
scan.execute()
assert scan._checks[0].outcome == CheckOutcome.PASS
assert scan._checks[1].outcome == CheckOutcome.FAIL
assert scan._checks[2].outcome == CheckOutcome.PASS
assert scan._checks[3].outcome == CheckOutcome.FAIL
assert scan._checks[4].outcome == CheckOutcome.FAIL
assert scan._checks[5].outcome == CheckOutcome.PASS
assert scan._checks[6].outcome == CheckOutcome.WARN
assert scan._checks[7].outcome == CheckOutcome.PASS
assert scan._checks[8].outcome == CheckOutcome.WARN
assert scan._checks[9].outcome == CheckOutcome.WARN
def test_schema_changes_warn_and_fail(scanner: Scanner):
table_name = scanner.ensure_test_table(customers_test_table)
data_source = scanner.data_source
# start from the historic measurement value
schema_metric_value_derived_from_test_table = derive_schema_metric_value_from_test_table(
customers_test_table, data_source
)
# remove the 4th column from the historic metric value
# this will result in schema check discovering a column being added
schema_metric_value_derived_from_test_table.pop(3)
scan = scanner.create_test_scan()
scan.mock_historic_values(
metric_identity=f"metric-{scan._scan_definition_name}-{scanner.data_source.data_source_name}-{table_name}-schema",
metric_values=[schema_metric_value_derived_from_test_table],
)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- schema:
warn:
when schema changes:
- column add
fail:
when wrong column type:
id: integer
does_not_exist: integer
pct: varchar
"""
)
scan.execute()
assert scan._checks[0].outcome == CheckOutcome.FAIL
def derive_schema_metric_value_from_test_table(test_table, data_source: DataSource):
return [
{
"name": data_source.format_column_default(column[0]),
"type": data_source.get_sql_type_for_schema_check(column[1]),
}
for column in test_table.columns
]
| 31.543933
| 122
| 0.631649
| 851
| 7,539
| 5.277321
| 0.117509
| 0.062124
| 0.083278
| 0.087063
| 0.888889
| 0.881764
| 0.834335
| 0.819639
| 0.810287
| 0.781118
| 0
| 0.005449
| 0.294071
| 7,539
| 238
| 123
| 31.676471
| 0.838407
| 0.049609
| 0
| 0.76
| 0
| 0
| 0.398072
| 0.052536
| 0
| 0
| 0
| 0
| 0.11
| 1
| 0.025
| false
| 0.05
| 0.025
| 0.005
| 0.055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
395c8f16482fd82232b5e41a57965e60cd30b368
| 125
|
py
|
Python
|
todo_or_die/__init__.py
|
sobolevn/py-todo-or-die
|
81fc01a7683b312144096c60d37a193dea8250ab
|
[
"MIT"
] | 67
|
2021-09-14T20:33:14.000Z
|
2021-11-24T20:37:44.000Z
|
todo_or_die/__init__.py
|
sobolevn/py-todo-or-die
|
81fc01a7683b312144096c60d37a193dea8250ab
|
[
"MIT"
] | 1
|
2021-09-27T07:48:56.000Z
|
2021-09-27T07:48:56.000Z
|
todo_or_die/__init__.py
|
sobolevn/py-todo-or-die
|
81fc01a7683b312144096c60d37a193dea8250ab
|
[
"MIT"
] | 2
|
2021-09-26T08:28:19.000Z
|
2021-10-21T11:15:03.000Z
|
from todo_or_die.main import todo_or_die
from todo_or_die.decorator import TodoOrDie
__all__ = ["todo_or_die", "TodoOrDie"]
| 25
| 43
| 0.816
| 21
| 125
| 4.285714
| 0.428571
| 0.266667
| 0.4
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104
| 125
| 4
| 44
| 31.25
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3999b59c278213643a57a01f73cf94b643dd8af2
| 192
|
py
|
Python
|
src/gcs_fake_pii_file_creator/__init__.py
|
mesmacosta/gcs-fake-pii-file-creator
|
6f89ffeefe4357734ba9ba13acae989ac2d355e3
|
[
"MIT"
] | 2
|
2020-02-29T07:04:34.000Z
|
2020-04-29T21:17:46.000Z
|
src/gcs_fake_pii_file_creator/__init__.py
|
mesmacosta/gcs-fake-pii-file-creator
|
6f89ffeefe4357734ba9ba13acae989ac2d355e3
|
[
"MIT"
] | null | null | null |
src/gcs_fake_pii_file_creator/__init__.py
|
mesmacosta/gcs-fake-pii-file-creator
|
6f89ffeefe4357734ba9ba13acae989ac2d355e3
|
[
"MIT"
] | null | null | null |
from .dataframe_creator import DFCreator
from .csv_creator import CSVCreator
from .gcs_fake_file_creator import GCSFakeFileCreator
from .gcs_fake_file_creator_cli import GCSFakeFileCreatorCLI
| 38.4
| 60
| 0.895833
| 25
| 192
| 6.52
| 0.52
| 0.239264
| 0.134969
| 0.184049
| 0.269939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 192
| 4
| 61
| 48
| 0.926136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
39cd4639172f1c83add293ee2fc18ea8ff791013
| 4,648
|
py
|
Python
|
autogl/data/graph/_general_static_graph/_abstract_views.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
autogl/data/graph/_general_static_graph/_abstract_views.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
autogl/data/graph/_general_static_graph/_abstract_views.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
import torch
import typing as _typing
from . import _canonical_edge_type
class SpecificTypedNodeDataView(_typing.MutableMapping[str, torch.Tensor]):
def __getitem__(self, data_key: str) -> torch.Tensor:
raise NotImplementedError
def __setitem__(self, data_key: str, value: torch.Tensor):
raise NotImplementedError
def __delitem__(self, data_key: str) -> None:
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> _typing.Iterator[str]:
raise NotImplementedError
class SpecificTypedNodeView:
@property
def data(self) -> SpecificTypedNodeDataView:
raise NotImplementedError
@data.setter
def data(self, nodes_data: _typing.Mapping[str, torch.Tensor]):
raise NotImplementedError
class HeterogeneousNodeView(_typing.Iterable[str]):
@property
def data(self) -> SpecificTypedNodeDataView:
raise NotImplementedError
@data.setter
def data(self, nodes_data: _typing.Mapping[str, torch.Tensor]):
raise NotImplementedError
def __getitem__(self, node_type: _typing.Optional[str]) -> SpecificTypedNodeView:
raise NotImplementedError
def __setitem__(
self, node_t: _typing.Optional[str],
nodes_data: _typing.Mapping[str, torch.Tensor]
):
raise NotImplementedError
def __delitem__(self, node_t: _typing.Optional[str]):
raise NotImplementedError
def __iter__(self) -> _typing.Iterator[str]:
raise NotImplementedError
@property
def is_homogeneous(self) -> bool:
raise NotImplementedError
class HomogeneousEdgesDataView(_typing.MutableMapping[str, torch.Tensor]):
def __getitem__(self, data_key: str) -> torch.Tensor:
raise NotImplementedError
def __setitem__(self, data_key: str, value: torch.Tensor):
raise NotImplementedError
def __delitem__(self, data_key: str):
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> _typing.Iterator[str]:
raise NotImplementedError
class HomogeneousEdgesView:
@property
def connections(self) -> torch.LongTensor:
raise NotImplementedError
@property
def data(self) -> HomogeneousEdgesDataView:
raise NotImplementedError
class HeterogeneousEdgesView(_typing.Collection[_canonical_edge_type.CanonicalEdgeType]):
@property
def connections(self) -> torch.LongTensor:
raise NotImplementedError
@property
def data(self) -> HomogeneousEdgesDataView:
raise NotImplementedError
@property
def is_homogeneous(self) -> bool:
raise NotImplementedError
def set(
self, edge_t: _typing.Union[None, str, _typing.Tuple[str, str, str]],
connections: torch.LongTensor, data: _typing.Optional[_typing.Mapping[str, torch.Tensor]] = ...
):
raise NotImplementedError
def __getitem__(
self,
edge_t: _typing.Union[
None, str, _typing.Tuple[str, str, str], _canonical_edge_type.CanonicalEdgeType
]
) -> HomogeneousEdgesView:
raise NotImplementedError
def __setitem__(
self,
edge_t: _typing.Union[
None, str, _typing.Tuple[str, str, str], _canonical_edge_type.CanonicalEdgeType
],
edges: _typing.Union[torch.LongTensor]
):
raise NotImplementedError
def __delitem__(
self,
edge_t: _typing.Union[
None, str, _typing.Tuple[str, str, str], _canonical_edge_type.CanonicalEdgeType
]
):
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> _typing.Iterator[_canonical_edge_type.CanonicalEdgeType]:
raise NotImplementedError
def __contains__(
self,
edge_type: _typing.Union[
str, _typing.Tuple[str, str, str], _canonical_edge_type.CanonicalEdgeType
]
) -> bool:
raise NotImplementedError
class GraphDataView(_typing.MutableMapping[str, torch.Tensor]):
def __setitem__(self, data_key: str, data: torch.Tensor) -> None:
raise NotImplementedError
def __delitem__(self, data_key: str) -> None:
raise NotImplementedError
def __getitem__(self, data_key: str) -> torch.Tensor:
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> _typing.Iterator[str]:
raise NotImplementedError
| 28.515337
| 107
| 0.671472
| 445
| 4,648
| 6.624719
| 0.121348
| 0.29308
| 0.210651
| 0.042741
| 0.82327
| 0.78867
| 0.752714
| 0.72829
| 0.72829
| 0.72829
| 0
| 0
| 0.245482
| 4,648
| 162
| 108
| 28.691358
| 0.840605
| 0
| 0
| 0.731092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.302521
| false
| 0
| 0.02521
| 0
| 0.386555
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
39d173353dc84a0124f9ae2493124e124138a2d7
| 44
|
py
|
Python
|
src/fastpli/tools/__init__.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
src/fastpli/tools/__init__.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
src/fastpli/tools/__init__.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
from . import rotation
from . import helper
| 14.666667
| 22
| 0.772727
| 6
| 44
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 23
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f2e68509df688ca131696ef0b89b918ed4705c3d
| 249,490
|
py
|
Python
|
mysite/patterns/99.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 19
|
2016-06-17T23:36:27.000Z
|
2020-01-13T16:41:55.000Z
|
mysite/patterns/99.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 13
|
2016-06-06T12:57:05.000Z
|
2019-02-05T02:21:00.000Z
|
patterns/99.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | 7
|
2017-03-28T18:12:22.000Z
|
2021-06-16T09:32:59.000Z
|
pattern_zero=[0.0, 0.0099989797, 0.0197938986, 0.0202020202, 0.0293847567, 0.0302009999, 0.0387715539, 0.0399959188, 0.0404040404, 0.0479542904, 0.0495867769, 0.0504030201, 0.056932966, 0.0589735741, 0.060197939, 0.0606060606, 0.0657075809, 0.0681563106, 0.0697887971, 0.0706050403, 0.0742781349, 0.0771349862, 0.0791755943, 0.0803999592, 0.0808080808, 0.0826446281, 0.0859096011, 0.0883583308, 0.0899908173, 0.0908070605, 0.0944801551, 0.0973370064, 0.0987654321, 0.0993776145, 0.1006019794, 0.101010101, 0.1028466483, 0.1061116213, 0.1065197429, 0.108560351, 0.1101928375, 0.1110090807, 0.1140699929, 0.1146821753, 0.1175390266, 0.1189674523, 0.1195796347, 0.1208039996, 0.1212121212, 0.121416182, 0.1230486685, 0.1263136415, 0.1267217631, 0.1285583104, 0.1287623712, 0.1303948577, 0.1312111009, 0.1342720131, 0.1348841955, 0.1354963779, 0.1377410468, 0.1391694725, 0.1397816549, 0.1410060198, 0.1414141414, 0.1416182022, 0.1422303847, 0.1432506887, 0.1465156617, 0.1469237833, 0.1487603306, 0.1489643914, 0.1505968779, 0.1514131211, 0.1544740333, 0.1550862157, 0.1556983981, 0.157943067, 0.1593714927, 0.1599836751, 0.16120804, 0.1616161616, 0.1618202224, 0.1624324049, 0.1634527089, 0.1667176819, 0.1671258035, 0.1689623508, 0.1691664116, 0.1707988981, 0.1716151413, 0.1728395062, 0.1746760535, 0.1752882359, 0.1759004183, 0.1781450872, 0.178349148, 0.1795735129, 0.1801856953, 0.1814100602, 0.1818181818, 0.1820222426, 0.1826344251, 0.1836547291, 0.1869197021, 0.1873278237, 0.1887562494, 0.189164371, 0.1893684318, 0.1910009183, 0.1918171615, 0.1930415264, 0.1936537088, 0.1948780737, 0.1954902561, 0.1961024385, 0.1983471074, 0.1985511682, 0.1997755331, 0.2003877155, 0.2016120804, 0.202020202, 0.2022242628, 0.2028364453, 0.2038567493, 0.2071217223, 0.2075298439, 0.2089582696, 0.2093663912, 0.209570452, 0.2112029385, 0.2120191817, 0.2132435466, 0.213855729, 0.2150800939, 0.2156922763, 0.2163044587, 0.2185491276, 0.2187531885, 0.2199775533, 0.2205897357, 0.2218141006, 0.2222222222, 0.222426283, 0.2230384655, 0.2240587695, 0.2254871952, 0.2273237425, 0.2277318641, 0.2285481073, 0.2291602898, 0.2295684114, 0.2297724722, 0.2314049587, 0.2322212019, 0.2334455668, 0.2340577492, 0.2352821141, 0.2358942965, 0.2365064789, 0.2387511478, 0.2389552087, 0.2401795735, 0.2407917559, 0.2420161208, 0.2424242424, 0.2426283032, 0.2432404857, 0.2442607897, 0.2456892154, 0.2469135802, 0.2475257627, 0.2479338843, 0.2487501275, 0.24936231, 0.2497704316, 0.2499744924, 0.2516069789, 0.2524232221, 0.253647587, 0.2542597694, 0.2554841343, 0.2560963167, 0.2567084991, 0.258953168, 0.2591572289, 0.2603815937, 0.2609937761, 0.262218141, 0.2626262626, 0.2628303234, 0.2634425059, 0.2644628099, 0.2658912356, 0.2671156004, 0.2677277829, 0.2681359045, 0.2689521477, 0.2695643302, 0.2699724518, 0.2701765126, 0.2718089991, 0.2726252423, 0.2738496072, 0.2744617896, 0.2756861545, 0.2762983369, 0.2769105193, 0.2791551882, 0.2793592491, 0.2805836139, 0.2811957963, 0.2824201612, 0.2828282828, 0.2830323436, 0.2836445261, 0.2846648301, 0.2860932558, 0.2873176207, 0.2879298031, 0.2883379247, 0.2891541679, 0.2897663504, 0.290174472, 0.2903785328, 0.2920110193, 0.2928272625, 0.2940516274, 0.2946638098, 0.2958881747, 0.2965003571, 0.2971125395, 0.2993572084, 0.2995612693, 0.3007856341, 0.3013978165, 0.3026221814, 0.303030303, 0.3032343638, 0.3038465463, 0.3048668503, 0.306295276, 0.3075196409, 0.3081318233, 0.3085399449, 0.3093561881, 0.3099683706, 0.3103764922, 0.310580553, 0.3122130395, 0.3130292827, 0.3142536476, 0.31486583, 0.3160901949, 0.3167023773, 0.3173145597, 0.3195592287, 0.3197632895, 0.3209876543, 0.3215998368, 0.3228242016, 0.3232323232, 0.323436384, 0.3240485665, 0.3250688705, 0.3264972962, 0.3277216611, 0.3283338435, 0.3287419651, 0.3295582083, 0.3301703908, 0.3305785124, 0.3307825732, 0.3324150597, 0.3332313029, 0.3344556678, 0.3350678502, 0.3362922151, 0.3369043975, 0.3375165799, 0.3397612489, 0.3399653097, 0.3411896745, 0.341801857, 0.3430262218, 0.3434343434, 0.3436384042, 0.3442505867, 0.3452708907, 0.3466993164, 0.3479236813, 0.3485358637, 0.3489439853, 0.3497602285, 0.350372411, 0.3507805326, 0.3509845934, 0.3526170799, 0.3534333231, 0.354657688, 0.3552698704, 0.3564942353, 0.3571064177, 0.3577186001, 0.3599632691, 0.3601673299, 0.3613916947, 0.3620038772, 0.363228242, 0.3636363636, 0.3638404244, 0.3644526069, 0.3654729109, 0.3669013366, 0.3681257015, 0.3687378839, 0.3691460055, 0.3699622488, 0.3705744312, 0.3709825528, 0.3711866136, 0.3728191001, 0.3736353433, 0.3748597082, 0.3754718906, 0.3766962555, 0.3773084379, 0.3779206203, 0.3801652893, 0.3803693501, 0.3815937149, 0.3822058974, 0.3834302622, 0.3838383838, 0.3840424446, 0.3846546271, 0.3856749311, 0.3871033568, 0.3883277217, 0.3889399041, 0.3893480257, 0.390164269, 0.3907764514, 0.391184573, 0.3913886338, 0.3930211203, 0.3938373635, 0.3950617284, 0.3956739108, 0.3968982757, 0.3975104581, 0.3981226405, 0.4003673095, 0.4005713703, 0.4017957351, 0.4024079176, 0.4036322824, 0.404040404, 0.4042444649, 0.4048566473, 0.4058769513, 0.407305377, 0.4085297419, 0.4091419243, 0.4095500459, 0.4103662892, 0.4109784716, 0.4113865932, 0.411590654, 0.4132231405, 0.4140393837, 0.4152637486, 0.415875931, 0.4171002959, 0.4177124783, 0.4183246607, 0.4205693297, 0.4207733905, 0.4219977553, 0.4226099378, 0.4238343026, 0.4242424242, 0.4244464851, 0.4250586675, 0.4260789715, 0.4275073972, 0.4287317621, 0.4293439445, 0.4297520661, 0.4305683094, 0.4311804918, 0.4315886134, 0.4317926742, 0.4334251607, 0.4342414039, 0.4354657688, 0.4360779512, 0.4373023161, 0.4379144985, 0.438526681, 0.4407713499, 0.4409754107, 0.4421997755, 0.442811958, 0.4440363228, 0.4444444444, 0.4446485053, 0.4452606877, 0.4462809917, 0.4477094174, 0.4489337823, 0.4495459647, 0.4499540863, 0.4507703296, 0.451382512, 0.4517906336, 0.4519946944, 0.4536271809, 0.4544434241, 0.455667789, 0.4562799714, 0.4575043363, 0.4581165187, 0.4587287012, 0.4609733701, 0.4611774309, 0.4624017957, 0.4630139782, 0.464238343, 0.4646464646, 0.4648505255, 0.4654627079, 0.4664830119, 0.4679114376, 0.4691358025, 0.4697479849, 0.4701561065, 0.4709723498, 0.4715845322, 0.4719926538, 0.4721967146, 0.4738292011, 0.4746454443, 0.4758698092, 0.4764819916, 0.4777063565, 0.4783185389, 0.4789307214, 0.4811753903, 0.4813794511, 0.4826038159, 0.4832159984, 0.4844403632, 0.4848484848, 0.4850525457, 0.4856647281, 0.4866850321, 0.4881134578, 0.4893378227, 0.4899500051, 0.4903581267, 0.49117437, 0.4917865524, 0.492194674, 0.4923987348, 0.4940312213, 0.4948474645, 0.4960718294, 0.4966840118, 0.4979083767, 0.4985205591, 0.4991327416, 0.5013774105, 0.5015814713, 0.5028058361, 0.5034180186, 0.5046423834, 0.5050505051, 0.5052545659, 0.5058667483, 0.5068870523, 0.508315478, 0.5095398429, 0.5101520253, 0.5105601469, 0.5113763902, 0.5119885726, 0.5123966942, 0.512600755, 0.5142332415, 0.5150494847, 0.5162738496, 0.516886032, 0.5181103969, 0.5187225793, 0.5193347618, 0.5215794307, 0.5217834915, 0.5230078563, 0.5236200388, 0.5248444036, 0.5252525253, 0.5254565861, 0.5260687685, 0.5270890725, 0.5285174982, 0.5297418631, 0.5303540455, 0.5307621671, 0.5315784104, 0.5321905928, 0.5325987144, 0.5328027752, 0.5344352617, 0.5352515049, 0.5364758698, 0.5370880522, 0.5383124171, 0.5389245995, 0.539536782, 0.5417814509, 0.5419855117, 0.5432098765, 0.543822059, 0.5450464238, 0.5454545455, 0.5456586063, 0.5462707887, 0.5472910927, 0.5487195184, 0.5499438833, 0.5505560657, 0.5509641873, 0.5517804306, 0.552392613, 0.5528007346, 0.5530047954, 0.5546372819, 0.5554535252, 0.55667789, 0.5572900724, 0.5585144373, 0.5591266197, 0.5597388022, 0.5619834711, 0.5621875319, 0.5634118967, 0.5640240792, 0.565248444, 0.5656565657, 0.5658606265, 0.5664728089, 0.5674931129, 0.5689215386, 0.5701459035, 0.5707580859, 0.5711662075, 0.5719824508, 0.5725946332, 0.5730027548, 0.5732068156, 0.5748393021, 0.5756555454, 0.5768799102, 0.5774920926, 0.5787164575, 0.5793286399, 0.5799408224, 0.5821854913, 0.5823895521, 0.5836139169, 0.5842260994, 0.5854504642, 0.5858585859, 0.5860626467, 0.5866748291, 0.5876951332, 0.5891235588, 0.5903479237, 0.5909601061, 0.5913682277, 0.592184471, 0.5927966534, 0.593204775, 0.5934088358, 0.5950413223, 0.5958575656, 0.5970819304, 0.5976941128, 0.5989184777, 0.5995306601, 0.6001428426, 0.6023875115, 0.6025915723, 0.6038159371, 0.6044281196, 0.6056524844, 0.6060606061, 0.6062646669, 0.6068768493, 0.6078971534, 0.609325579, 0.6105499439, 0.6111621263, 0.6115702479, 0.6123864912, 0.6129986736, 0.6134067952, 0.613610856, 0.6152433425, 0.6160595858, 0.6172839506, 0.617896133, 0.6191204979, 0.6197326803, 0.6203448628, 0.6225895317, 0.6227935925, 0.6240179574, 0.6246301398, 0.6258545046, 0.6262626263, 0.6264666871, 0.6270788695, 0.6280991736, 0.6295275992, 0.6307519641, 0.6313641465, 0.6317722681, 0.6325885114, 0.6332006938, 0.6336088154, 0.6338128762, 0.6354453627, 0.636261606, 0.6374859708, 0.6380981533, 0.6393225181, 0.6399347005, 0.640546883, 0.6427915519, 0.6429956127, 0.6442199776, 0.64483216, 0.6460565248, 0.6464646465, 0.6466687073, 0.6472808897, 0.6483011938, 0.6497296194, 0.6509539843, 0.6515661667, 0.6519742883, 0.6527905316, 0.653402714, 0.6538108356, 0.6540148964, 0.6556473829, 0.6564636262, 0.657687991, 0.6583001735, 0.6595245383, 0.6601367207, 0.6607489032, 0.6629935721, 0.6631976329, 0.6644219978, 0.6650341802, 0.666258545, 0.6666666667, 0.6668707275, 0.6674829099, 0.668503214, 0.6699316396, 0.6711560045, 0.6717681869, 0.6721763085, 0.6729925518, 0.6736047342, 0.6740128558, 0.6742169166, 0.6758494031, 0.6766656464, 0.6778900112, 0.6785021937, 0.6797265585, 0.6803387409, 0.6809509234, 0.6831955923, 0.6833996531, 0.684624018, 0.6852362004, 0.6864605652, 0.6868686869, 0.6870727477, 0.6876849301, 0.6887052342, 0.6901336598, 0.6913580247, 0.6919702071, 0.6923783287, 0.693194572, 0.6938067544, 0.694214876, 0.6944189368, 0.6960514233, 0.6968676666, 0.6980920314, 0.6987042139, 0.6999285787, 0.7005407611, 0.7011529436, 0.7033976125, 0.7036016733, 0.7048260382, 0.7054382206, 0.7066625855, 0.7070707071, 0.7072747679, 0.7078869503, 0.7089072544, 0.71033568, 0.7115600449, 0.7121722273, 0.7125803489, 0.7133965922, 0.7140087746, 0.7144168962, 0.714620957, 0.7162534435, 0.7170696868, 0.7182940516, 0.7189062341, 0.7201305989, 0.7207427813, 0.7213549638, 0.7235996327, 0.7238036935, 0.7250280584, 0.7256402408, 0.7268646057, 0.7272727273, 0.7274767881, 0.7280889705, 0.7291092746, 0.7305377002, 0.7317620651, 0.7323742475, 0.7327823691, 0.7335986124, 0.7342107948, 0.7346189164, 0.7348229772, 0.7364554637, 0.737271707, 0.7384960718, 0.7391082543, 0.7403326191, 0.7409448016, 0.741556984, 0.7438016529, 0.7440057137, 0.7452300786, 0.745842261, 0.7470666259, 0.7474747475, 0.7476788083, 0.7482909907, 0.7493112948, 0.7507397204, 0.7519640853, 0.7525762677, 0.7529843893, 0.7538006326, 0.754412815, 0.7548209366, 0.7550249974, 0.7566574839, 0.7574737272, 0.758698092, 0.7593102745, 0.7605346393, 0.7611468218, 0.7617590042, 0.7640036731, 0.7642077339, 0.7654320988, 0.7660442812, 0.7672686461, 0.7676767677, 0.7678808285, 0.7684930109, 0.769513315, 0.7709417406, 0.7721661055, 0.7727782879, 0.7731864096, 0.7740026528, 0.7746148352, 0.7750229568, 0.7752270177, 0.7768595041, 0.7776757474, 0.7789001122, 0.7795122947, 0.7807366595, 0.781348842, 0.7819610244, 0.7842056933, 0.7844097541, 0.785634119, 0.7862463014, 0.7874706663, 0.7878787879, 0.7880828487, 0.7886950311, 0.7897153352, 0.7911437608, 0.7923681257, 0.7929803081, 0.7933884298, 0.794204673, 0.7948168554, 0.795224977, 0.7954290379, 0.7970615243, 0.7978777676, 0.7991021324, 0.7997143149, 0.8009386797, 0.8015508622, 0.8021630446, 0.8044077135, 0.8046117743, 0.8058361392, 0.8064483216, 0.8076726865, 0.8080808081, 0.8082848689, 0.8088970513, 0.8099173554, 0.811345781, 0.8125701459, 0.8131823283, 0.81359045, 0.8144066932, 0.8150188756, 0.8154269972, 0.8156310581, 0.8172635445, 0.8180797878, 0.8193041526, 0.8199163351, 0.8211406999, 0.8217528824, 0.8223650648, 0.8246097337, 0.8248137945, 0.8260381594, 0.8266503418, 0.8278747067, 0.8282828283, 0.8284868891, 0.8290990715, 0.8301193756, 0.8315478012, 0.8327721661, 0.8333843485, 0.8337924702, 0.8346087134, 0.8352208958, 0.8356290174, 0.8358330783, 0.8374655647, 0.838281808, 0.8395061728, 0.8401183553, 0.8413427201, 0.8419549026, 0.842567085, 0.8448117539, 0.8450158147, 0.8462401796, 0.846852362, 0.8480767269, 0.8484848485, 0.8486889093, 0.8493010917, 0.8503213958, 0.8517498214, 0.8529741863, 0.8535863687, 0.8539944904, 0.8548107336, 0.855422916, 0.8558310376, 0.8560350985, 0.8576675849, 0.8584838282, 0.859708193, 0.8603203755, 0.8615447403, 0.8621569228, 0.8627691052, 0.8650137741, 0.8652178349, 0.8664421998, 0.8670543822, 0.8682787471, 0.8686868687, 0.8688909295, 0.8695031119, 0.870523416, 0.8719518416, 0.8731762065, 0.8737883889, 0.8741965106, 0.8750127538, 0.8756249362, 0.8760330579, 0.8762371187, 0.8778696051, 0.8786858484, 0.8799102132, 0.8805223957, 0.8817467605, 0.882358943, 0.8829711254, 0.8852157943, 0.8854198551, 0.88664422, 0.8872564024, 0.8884807673, 0.8888888889, 0.8890929497, 0.8897051321, 0.8907254362, 0.8921538619, 0.8933782267, 0.8939904091, 0.8943985308, 0.895214774, 0.8958269564, 0.8962350781, 0.8964391389, 0.8980716253, 0.8988878686, 0.9001122334, 0.9007244159, 0.9019487807, 0.9025609632, 0.9031731456, 0.9054178145, 0.9056218753, 0.9068462402, 0.9074584226, 0.9086827875, 0.9090909091, 0.9092949699, 0.9099071523, 0.9109274564, 0.9123558821, 0.9135802469, 0.9141924293, 0.914600551, 0.9154167942, 0.9160289766, 0.9164370983, 0.9166411591, 0.9182736455, 0.9190898888, 0.9203142536, 0.9209264361, 0.9221508009, 0.9227629834, 0.9233751658, 0.9256198347, 0.9258238955, 0.9270482604, 0.9276604428, 0.9288848077, 0.9292929293, 0.9294969901, 0.9301091725, 0.9311294766, 0.9325579023, 0.9337822671, 0.9343944495, 0.9348025712, 0.9356188144, 0.9362309968, 0.9366391185, 0.9368431793, 0.9384756657, 0.939291909, 0.9405162739, 0.9411284563, 0.9423528211, 0.9429650036, 0.943577186, 0.9458218549, 0.9460259157, 0.9472502806, 0.947862463, 0.9490868279, 0.9494949495, 0.9496990103, 0.9503111927, 0.9513314968, 0.9527599225, 0.9539842873, 0.9545964697, 0.9550045914, 0.9558208346, 0.956433017, 0.9568411387, 0.9570451995, 0.958677686, 0.9594939292, 0.9607182941, 0.9613304765, 0.9625548413, 0.9631670238, 0.9637792062, 0.9660238751, 0.9662279359, 0.9674523008, 0.9680644832, 0.9692888481, 0.9696969697, 0.9699010305, 0.9705132129, 0.971533517, 0.9729619427, 0.9741863075, 0.97479849, 0.9752066116, 0.9760228548, 0.9766350372, 0.9770431589, 0.9772472197, 0.9788797062, 0.9796959494, 0.9809203143, 0.9815324967, 0.9827568615, 0.983369044, 0.9839812264, 0.9862258953, 0.9864299561, 0.987654321, 0.9882665034, 0.9894908683, 0.9898989899, 0.9901030507, 0.9907152331, 0.9917355372, 0.9931639629, 0.9943883277, 0.9950005102, 0.9954086318, 0.996224875, 0.9968370574, 0.9972451791, 0.9974492399, 0.9990817264, 0.9998979696]
pattern_odd=[0.001122334, 0.001734517, 0.002958882, 0.003571064, 0.004183247, 0.006427916, 0.006631976, 0.007856341, 0.008468524, 0.009692888, 0.01010101, 0.010305071, 0.010917253, 0.011937557, 0.013365983, 0.014590348, 0.01520253, 0.015610652, 0.016426895, 0.017039078, 0.017447199, 0.01765126, 0.019283747, 0.02009999, 0.021324355, 0.021936537, 0.023160902, 0.023773084, 0.024385267, 0.026629936, 0.026833997, 0.028058361, 0.028670544, 0.029894909, 0.03030303, 0.030507091, 0.031119274, 0.032139578, 0.033568003, 0.034792368, 0.035404551, 0.035812672, 0.036628915, 0.037241098, 0.037649219, 0.03785328, 0.039485767, 0.04030201, 0.041526375, 0.042138557, 0.043362922, 0.043975105, 0.044587287, 0.046831956, 0.047036017, 0.048260382, 0.048872564, 0.050096929, 0.050505051, 0.050709111, 0.051321294, 0.052341598, 0.053770023, 0.054994388, 0.055606571, 0.056014692, 0.056830936, 0.057443118, 0.05785124, 0.0580553, 0.059687787, 0.06050403, 0.061728395, 0.062340577, 0.063564942, 0.064177125, 0.064789307, 0.067033976, 0.067238037, 0.068462402, 0.069074584, 0.070298949, 0.070707071, 0.070911132, 0.071523314, 0.072543618, 0.073972044, 0.075196409, 0.075808591, 0.076216713, 0.077032956, 0.077645138, 0.07805326, 0.078257321, 0.079889807, 0.08070605, 0.081930415, 0.082542598, 0.083766963, 0.084379145, 0.084991327, 0.087235996, 0.087440057, 0.088664422, 0.089276604, 0.090500969, 0.090909091, 0.091113152, 0.091725334, 0.092745638, 0.094174064, 0.095398429, 0.096010611, 0.096418733, 0.097234976, 0.097847158, 0.09825528, 0.098459341, 0.100091827, 0.100908071, 0.102132435, 0.102744618, 0.103968983, 0.104581165, 0.105193348, 0.107438017, 0.107642077, 0.108866442, 0.109478625, 0.110702989, 0.111111111, 0.111315172, 0.111927354, 0.112947658, 0.114376084, 0.115600449, 0.116212631, 0.116620753, 0.117436996, 0.118049179, 0.1184573, 0.118661361, 0.120293848, 0.121110091, 0.122334456, 0.122946638, 0.124171003, 0.124783185, 0.125395368, 0.127640037, 0.127844098, 0.129068462, 0.129680645, 0.13090501, 0.131313131, 0.131517192, 0.132129375, 0.133149679, 0.134578104, 0.135802469, 0.136414652, 0.136822773, 0.137639016, 0.138251199, 0.13865932, 0.138863381, 0.140495868, 0.141312111, 0.142536476, 0.143148658, 0.144373023, 0.144985206, 0.145597388, 0.147842057, 0.148046118, 0.149270483, 0.149882665, 0.15110703, 0.151515152, 0.151719212, 0.152331395, 0.153351699, 0.154780124, 0.156004489, 0.156616672, 0.157024793, 0.157841037, 0.158453219, 0.158861341, 0.159065401, 0.160697888, 0.161514131, 0.162738496, 0.163350679, 0.164575043, 0.165187226, 0.165799408, 0.168044077, 0.168248138, 0.169472503, 0.170084685, 0.17130905, 0.171717172, 0.171921233, 0.172533415, 0.173553719, 0.174982145, 0.17620651, 0.176818692, 0.177226814, 0.178043057, 0.178655239, 0.179063361, 0.179267422, 0.180899908, 0.181716151, 0.182940516, 0.183552699, 0.184777064, 0.185389246, 0.186001428, 0.188246097, 0.188450158, 0.189674523, 0.190286705, 0.19151107, 0.191919192, 0.192123253, 0.192735435, 0.193755739, 0.195184165, 0.19640853, 0.197020712, 0.197428834, 0.198245077, 0.198857259, 0.199265381, 0.199469442, 0.201101928, 0.201918172, 0.203142536, 0.203754719, 0.204979084, 0.205591266, 0.206203449, 0.208448118, 0.208652178, 0.209876543, 0.210488726, 0.211713091, 0.212121212, 0.212325273, 0.212937455, 0.213957759, 0.215386185, 0.21661055, 0.217222732, 0.217630854, 0.218447097, 0.21905928, 0.219467401, 0.219671462, 0.221303949, 0.222120192, 0.223344557, 0.223956739, 0.225181104, 0.225793286, 0.226405469, 0.228650138, 0.228854199, 0.230078563, 0.230690746, 0.231915111, 0.232323232, 0.232527293, 0.233139476, 0.23415978, 0.235588205, 0.23681257, 0.237424753, 0.237832874, 0.238649117, 0.2392613, 0.239669421, 0.239873482, 0.241505969, 0.242322212, 0.243546577, 0.244158759, 0.245383124, 0.245995307, 0.246607489, 0.248852158, 0.249056219, 0.250280584, 0.250892766, 0.252117131, 0.252525253, 0.252729313, 0.253341496, 0.2543618, 0.255790225, 0.25701459, 0.257626773, 0.258034894, 0.258851138, 0.25946332, 0.259871442, 0.260075503, 0.261707989, 0.262524232, 0.263748597, 0.26436078, 0.265585144, 0.266197327, 0.266809509, 0.269054178, 0.269258239, 0.270482604, 0.271094786, 0.272319151, 0.272727273, 0.272931334, 0.273543516, 0.27456382, 0.275992246, 0.277216611, 0.277828793, 0.278236915, 0.279053158, 0.27966534, 0.280073462, 0.280277523, 0.281910009, 0.282726252, 0.283950617, 0.2845628, 0.285787165, 0.286399347, 0.287011529, 0.289256198, 0.289460259, 0.290684624, 0.291296806, 0.292521171, 0.292929293, 0.293133354, 0.293745536, 0.29476584, 0.296194266, 0.297418631, 0.298030813, 0.298438935, 0.299255178, 0.29986736, 0.300275482, 0.300479543, 0.302112029, 0.302928273, 0.304152637, 0.30476482, 0.305989185, 0.306601367, 0.30721355, 0.309458219, 0.309662279, 0.310886644, 0.311498827, 0.312723192, 0.313131313, 0.313335374, 0.313947556, 0.31496786, 0.316396286, 0.317620651, 0.318232833, 0.318640955, 0.319457198, 0.320069381, 0.320477502, 0.320681563, 0.32231405, 0.323130293, 0.324354658, 0.32496684, 0.326191205, 0.326803387, 0.32741557, 0.329660239, 0.3298643, 0.331088664, 0.331700847, 0.332925212, 0.333333333, 0.333537394, 0.334149577, 0.335169881, 0.336598306, 0.337822671, 0.338434854, 0.338842975, 0.339659218, 0.340271401, 0.340679522, 0.340883583, 0.34251607, 0.343332313, 0.344556678, 0.34516886, 0.346393225, 0.347005408, 0.34761759, 0.349862259, 0.35006632, 0.351290685, 0.351902867, 0.353127232, 0.353535354, 0.353739414, 0.354351597, 0.355371901, 0.356800326, 0.358024691, 0.358636874, 0.359044995, 0.359861239, 0.360473421, 0.360881543, 0.361085604, 0.36271809, 0.363534333, 0.364758698, 0.365370881, 0.366595245, 0.367207428, 0.36781961, 0.370064279, 0.37026834, 0.371492705, 0.372104887, 0.373329252, 0.373737374, 0.373941435, 0.374553617, 0.375573921, 0.377002347, 0.378226712, 0.378838894, 0.379247016, 0.380063259, 0.380675441, 0.381083563, 0.381287624, 0.38292011, 0.383736353, 0.384960718, 0.385572901, 0.386797266, 0.387409448, 0.38802163, 0.390266299, 0.39047036, 0.391694725, 0.392306907, 0.393531272, 0.393939394, 0.394143455, 0.394755637, 0.395775941, 0.397204367, 0.398428732, 0.399040914, 0.399449036, 0.400265279, 0.400877461, 0.401285583, 0.401489644, 0.40312213, 0.403938374, 0.405162738, 0.405774921, 0.406999286, 0.407611468, 0.408223651, 0.41046832, 0.41067238, 0.411896745, 0.412508928, 0.413733293, 0.414141414, 0.414345475, 0.414957657, 0.415977961, 0.417406387, 0.418630752, 0.419242934, 0.419651056, 0.420467299, 0.421079482, 0.421487603, 0.421691664, 0.423324151, 0.424140394, 0.425364759, 0.425976941, 0.427201306, 0.427813488, 0.428425671, 0.43067034, 0.430874401, 0.432098765, 0.432710948, 0.433935313, 0.434343434, 0.434547495, 0.435159678, 0.436179982, 0.437608407, 0.438832772, 0.439444955, 0.439853076, 0.440669319, 0.441281502, 0.441689624, 0.441893684, 0.443526171, 0.444342414, 0.445566779, 0.446178961, 0.447403326, 0.448015509, 0.448627691, 0.45087236, 0.451076421, 0.452300786, 0.452912968, 0.454137333, 0.454545455, 0.454749515, 0.455361698, 0.456382002, 0.457810428, 0.459034792, 0.459646975, 0.460055096, 0.46087134, 0.461483522, 0.461891644, 0.462095705, 0.463728191, 0.464544434, 0.465768799, 0.466380982, 0.467605346, 0.468217529, 0.468829711, 0.47107438, 0.471278441, 0.472502806, 0.473114988, 0.474339353, 0.474747475, 0.474951536, 0.475563718, 0.476584022, 0.478012448, 0.479236813, 0.479848995, 0.480257117, 0.48107336, 0.481685542, 0.482093664, 0.482297725, 0.483930211, 0.484746454, 0.485970819, 0.486583002, 0.487807367, 0.488419549, 0.489031731, 0.4912764, 0.491480461, 0.492704826, 0.493317008, 0.494541373, 0.494949495, 0.495153556, 0.495765738, 0.496786042, 0.498214468, 0.499438833, 0.500051015, 0.500459137, 0.50127538, 0.501887562, 0.502295684, 0.502499745, 0.504132231, 0.504948475, 0.50617284, 0.506785022, 0.508009387, 0.508621569, 0.509233752, 0.511478421, 0.511682481, 0.512906846, 0.513519029, 0.514743394, 0.515151515, 0.515355576, 0.515967758, 0.516988062, 0.518416488, 0.519640853, 0.520253035, 0.520661157, 0.5214774, 0.522089583, 0.522497704, 0.522701765, 0.524334252, 0.525150495, 0.52637486, 0.526987042, 0.528211407, 0.528823589, 0.529435772, 0.531680441, 0.531884502, 0.533108866, 0.533721049, 0.534945414, 0.535353535, 0.535557596, 0.536169779, 0.537190083, 0.538618508, 0.539842873, 0.540455056, 0.540863177, 0.54167942, 0.542291603, 0.542699725, 0.542903785, 0.544536272, 0.545352515, 0.54657688, 0.547189062, 0.548413427, 0.54902561, 0.549637792, 0.551882461, 0.552086522, 0.553310887, 0.553923069, 0.555147434, 0.555555556, 0.555759616, 0.556371799, 0.557392103, 0.558820529, 0.560044893, 0.560657076, 0.561065197, 0.561881441, 0.562493623, 0.562901745, 0.563105806, 0.564738292, 0.565554535, 0.5667789, 0.567391083, 0.568615447, 0.56922763, 0.569839812, 0.572084481, 0.572288542, 0.573512907, 0.574125089, 0.575349454, 0.575757576, 0.575961637, 0.576573819, 0.577594123, 0.579022549, 0.580246914, 0.580859096, 0.581267218, 0.582083461, 0.582695643, 0.583103765, 0.583307826, 0.584940312, 0.585756555, 0.58698092, 0.587593103, 0.588817468, 0.58942965, 0.590041832, 0.592286501, 0.592490562, 0.593714927, 0.594327109, 0.595551474, 0.595959596, 0.596163657, 0.596775839, 0.597796143, 0.599224569, 0.600448934, 0.601061116, 0.601469238, 0.602285481, 0.602897664, 0.603305785, 0.603509846, 0.605142332, 0.605958576, 0.607182941, 0.607795123, 0.609019488, 0.60963167, 0.610243853, 0.612488522, 0.612692582, 0.613916947, 0.61452913, 0.615753495, 0.616161616, 0.616365677, 0.616977859, 0.617998163, 0.619426589, 0.620650954, 0.621263136, 0.621671258, 0.622487501, 0.623099684, 0.623507805, 0.623711866, 0.625344353, 0.626160596, 0.627384961, 0.627997143, 0.629221508, 0.62983369, 0.630445873, 0.632690542, 0.632894603, 0.634118967, 0.63473115, 0.635955515, 0.636363636, 0.636567697, 0.63717988, 0.638200184, 0.639628609, 0.640852974, 0.641465157, 0.641873278, 0.642689521, 0.643301704, 0.643709826, 0.643913886, 0.645546373, 0.646362616, 0.647586981, 0.648199163, 0.649423528, 0.650035711, 0.650647893, 0.652892562, 0.653096623, 0.654320988, 0.65493317, 0.656157535, 0.656565657, 0.656769717, 0.6573819, 0.658402204, 0.65983063, 0.661054994, 0.661667177, 0.662075298, 0.662891542, 0.663503724, 0.663911846, 0.664115907, 0.665748393, 0.666564636, 0.667789001, 0.668401184, 0.669625548, 0.670237731, 0.670849913, 0.673094582, 0.673298643, 0.674523008, 0.67513519, 0.676359555, 0.676767677, 0.676971738, 0.67758392, 0.678604224, 0.68003265, 0.681257015, 0.681869197, 0.682277319, 0.683093562, 0.683705744, 0.684113866, 0.684317927, 0.685950413, 0.686766656, 0.687991021, 0.688603204, 0.689827569, 0.690439751, 0.691051933, 0.693296602, 0.693500663, 0.694725028, 0.69533721, 0.696561575, 0.696969697, 0.697173758, 0.69778594, 0.698806244, 0.70023467, 0.701459035, 0.702071217, 0.702479339, 0.703295582, 0.703907765, 0.704315886, 0.704519947, 0.706152433, 0.706968677, 0.708193042, 0.708805224, 0.710029589, 0.710641771, 0.711253954, 0.713498623, 0.713702683, 0.714927048, 0.715539231, 0.716763596, 0.717171717, 0.717375778, 0.71798796, 0.719008264, 0.72043669, 0.721661055, 0.722273237, 0.722681359, 0.723497602, 0.724109785, 0.724517906, 0.724721967, 0.726354454, 0.727170697, 0.728395062, 0.729007244, 0.730231609, 0.730843791, 0.731455974, 0.733700643, 0.733904704, 0.735129068, 0.735741251, 0.736965616, 0.737373737, 0.737577798, 0.738189981, 0.739210285, 0.74063871, 0.741863075, 0.742475258, 0.742883379, 0.743699622, 0.744311805, 0.744719927, 0.744923987, 0.746556474, 0.747372717, 0.748597082, 0.749209264, 0.750433629, 0.751045812, 0.751657994, 0.753902663, 0.754106724, 0.755331089, 0.755943271, 0.757167636, 0.757575758, 0.757779818, 0.758392001, 0.759412305, 0.760840731, 0.762065095, 0.762677278, 0.763085399, 0.763901643, 0.764513825, 0.764921947, 0.765126008, 0.766758494, 0.767574737, 0.768799102, 0.769411285, 0.770635649, 0.771247832, 0.771860014, 0.774104683, 0.774308744, 0.775533109, 0.776145291, 0.777369656, 0.777777778, 0.777981839, 0.778594021, 0.779614325, 0.781042751, 0.782267116, 0.782879298, 0.78328742, 0.784103663, 0.784715845, 0.785123967, 0.785328028, 0.786960514, 0.787776757, 0.789001122, 0.789613305, 0.79083767, 0.791449852, 0.792062034, 0.794306703, 0.794510764, 0.795735129, 0.796347311, 0.797571676, 0.797979798, 0.798183859, 0.798796041, 0.799816345, 0.801244771, 0.802469136, 0.803081318, 0.80348944, 0.804305683, 0.804917866, 0.805325987, 0.805530048, 0.807162534, 0.807978778, 0.809203143, 0.809815325, 0.81103969, 0.811651872, 0.812264055, 0.814508724, 0.814712784, 0.815937149, 0.816549332, 0.817773697, 0.818181818, 0.818385879, 0.818998061, 0.820018365, 0.821446791, 0.822671156, 0.823283338, 0.82369146, 0.824507703, 0.825119886, 0.825528007, 0.825732068, 0.827364555, 0.828180798, 0.829405163, 0.830017345, 0.83124171, 0.831853892, 0.832466075, 0.834710744, 0.834914805, 0.836139169, 0.836751352, 0.837975717, 0.838383838, 0.838587899, 0.839200082, 0.840220386, 0.841648811, 0.842873176, 0.843485359, 0.84389348, 0.844709723, 0.845321906, 0.845730028, 0.845934088, 0.847566575, 0.848382818, 0.849607183, 0.850219365, 0.85144373, 0.852055913, 0.852668095, 0.854912764, 0.855116825, 0.85634119, 0.856953372, 0.858177737, 0.858585859, 0.858789919, 0.859402102, 0.860422406, 0.861850832, 0.863075196, 0.863687379, 0.8640955, 0.864911744, 0.865523926, 0.865932048, 0.866136109, 0.867768595, 0.868584838, 0.869809203, 0.870421386, 0.87164575, 0.872257933, 0.872870115, 0.875114784, 0.875318845, 0.87654321, 0.877155392, 0.878379757, 0.878787879, 0.87899194, 0.879604122, 0.880624426, 0.882052852, 0.883277217, 0.883889399, 0.884297521, 0.885113764, 0.885725946, 0.886134068, 0.886338129, 0.887970615, 0.888786858, 0.890011223, 0.890623406, 0.891847771, 0.892459953, 0.893072135, 0.895316804, 0.895520865, 0.89674523, 0.897357413, 0.898581777, 0.898989899, 0.89919396, 0.899806142, 0.900826446, 0.902254872, 0.903479237, 0.904091419, 0.904499541, 0.905315784, 0.905927967, 0.906336088, 0.906540149, 0.908172635, 0.908988879, 0.910213244, 0.910825426, 0.912049791, 0.912661973, 0.913274156, 0.915518825, 0.915722885, 0.91694725, 0.917559433, 0.918783798, 0.919191919, 0.91939598, 0.920008162, 0.921028466, 0.922456892, 0.923681257, 0.924293439, 0.924701561, 0.925517804, 0.926129987, 0.926538108, 0.926742169, 0.928374656, 0.929190899, 0.930415264, 0.931027446, 0.932251811, 0.932863993, 0.933476176, 0.935720845, 0.935924906, 0.93714927, 0.937761453, 0.938985818, 0.939393939, 0.939598, 0.940210183, 0.941230487, 0.942658912, 0.943883277, 0.94449546, 0.944903581, 0.945719825, 0.946332007, 0.946740129, 0.946944189, 0.948576676, 0.949392919, 0.950617284, 0.951229466, 0.952453831, 0.953066014, 0.953678196, 0.955922865, 0.956126926, 0.957351291, 0.957963473, 0.959187838, 0.95959596, 0.95980002, 0.960412203, 0.961432507, 0.962860933, 0.964085297, 0.96469748, 0.965105601, 0.965921845, 0.966534027, 0.966942149, 0.96714621, 0.968778696, 0.969594939, 0.970819304, 0.971431487, 0.972655851, 0.973268034, 0.973880216, 0.976124885, 0.976328946, 0.977553311, 0.978165493, 0.979389858, 0.97979798, 0.980002041, 0.980614223, 0.981634527, 0.983062953, 0.984287318, 0.9848995, 0.985307622, 0.986123865, 0.986736047, 0.987144169, 0.98734823, 0.988980716, 0.989796959, 0.991021324, 0.991633507, 0.992857872, 0.993470054, 0.994082237, 0.996326905, 0.996530966, 0.997755331, 0.998367514, 0.999591878]
pattern_even=[0.0, 0.0002040608, 0.0008162432, 0.0018365473, 0.003264973, 0.0044893378, 0.0051015203, 0.0055096419, 0.0063258851, 0.0069380675, 0.0073461892, 0.00755025, 0.0091827365, 0.0099989797, 0.0112233446, 0.011835527, 0.0130598918, 0.0136720743, 0.0142842567, 0.0165289256, 0.0167329864, 0.0179573513, 0.0185695337, 0.0197938986, 0.0202020202, 0.020406081, 0.0210182634, 0.0220385675, 0.0234669932, 0.024691358, 0.0253035405, 0.0257116621, 0.0265279053, 0.0271400877, 0.0275482094, 0.0277522702, 0.0293847567, 0.0302009999, 0.0314253648, 0.0320375472, 0.0332619121, 0.0338740945, 0.0344862769, 0.0367309458, 0.0369350066, 0.0381593715, 0.0387715539, 0.0399959188, 0.0404040404, 0.0406081012, 0.0412202836, 0.0422405877, 0.0436690134, 0.0448933782, 0.0455055607, 0.0459136823, 0.0467299255, 0.0473421079, 0.0477502296, 0.0479542904, 0.0495867769, 0.0504030201, 0.051627385, 0.0522395674, 0.0534639323, 0.0540761147, 0.0546882971, 0.056932966, 0.0571370268, 0.0583613917, 0.0589735741, 0.060197939, 0.0606060606, 0.0608101214, 0.0614223038, 0.0624426079, 0.0638710336, 0.0650953984, 0.0657075809, 0.0661157025, 0.0669319457, 0.0675441282, 0.0679522498, 0.0681563106, 0.0697887971, 0.0706050403, 0.0718294052, 0.0724415876, 0.0736659525, 0.0742781349, 0.0748903173, 0.0771349862, 0.077339047, 0.0785634119, 0.0791755943, 0.0803999592, 0.0808080808, 0.0810121416, 0.081624324, 0.0826446281, 0.0840730538, 0.0852974186, 0.0859096011, 0.0863177227, 0.0871339659, 0.0877461484, 0.08815427, 0.0883583308, 0.0899908173, 0.0908070605, 0.0920314254, 0.0926436078, 0.0938679727, 0.0944801551, 0.0950923375, 0.0973370064, 0.0975410672, 0.0987654321, 0.0993776145, 0.1006019794, 0.101010101, 0.1012141618, 0.1018263443, 0.1028466483, 0.104275074, 0.1054994388, 0.1061116213, 0.1065197429, 0.1073359861, 0.1079481686, 0.1083562902, 0.108560351, 0.1101928375, 0.1110090807, 0.1122334456, 0.112845628, 0.1140699929, 0.1146821753, 0.1152943577, 0.1175390266, 0.1177430874, 0.1189674523, 0.1195796347, 0.1208039996, 0.1212121212, 0.121416182, 0.1220283645, 0.1230486685, 0.1244770942, 0.125701459, 0.1263136415, 0.1267217631, 0.1275380063, 0.1281501888, 0.1285583104, 0.1287623712, 0.1303948577, 0.1312111009, 0.1324354658, 0.1330476482, 0.1342720131, 0.1348841955, 0.1354963779, 0.1377410468, 0.1379451076, 0.1391694725, 0.1397816549, 0.1410060198, 0.1414141414, 0.1416182022, 0.1422303847, 0.1432506887, 0.1446791144, 0.1459034792, 0.1465156617, 0.1469237833, 0.1477400265, 0.148352209, 0.1487603306, 0.1489643914, 0.1505968779, 0.1514131211, 0.152637486, 0.1532496684, 0.1544740333, 0.1550862157, 0.1556983981, 0.157943067, 0.1581471278, 0.1593714927, 0.1599836751, 0.16120804, 0.1616161616, 0.1618202224, 0.1624324049, 0.1634527089, 0.1648811346, 0.1661054994, 0.1667176819, 0.1671258035, 0.1679420467, 0.1685542292, 0.1689623508, 0.1691664116, 0.1707988981, 0.1716151413, 0.1728395062, 0.1734516886, 0.1746760535, 0.1752882359, 0.1759004183, 0.1781450872, 0.178349148, 0.1795735129, 0.1801856953, 0.1814100602, 0.1818181818, 0.1820222426, 0.1826344251, 0.1836547291, 0.1850831548, 0.1863075196, 0.1869197021, 0.1873278237, 0.1881440669, 0.1887562494, 0.189164371, 0.1893684318, 0.1910009183, 0.1918171615, 0.1930415264, 0.1936537088, 0.1948780737, 0.1954902561, 0.1961024385, 0.1983471074, 0.1985511682, 0.1997755331, 0.2003877155, 0.2016120804, 0.202020202, 0.2022242628, 0.2028364453, 0.2038567493, 0.205285175, 0.2065095398, 0.2071217223, 0.2075298439, 0.2083460871, 0.2089582696, 0.2093663912, 0.209570452, 0.2112029385, 0.2120191817, 0.2132435466, 0.213855729, 0.2150800939, 0.2156922763, 0.2163044587, 0.2185491276, 0.2187531885, 0.2199775533, 0.2205897357, 0.2218141006, 0.2222222222, 0.222426283, 0.2230384655, 0.2240587695, 0.2254871952, 0.22671156, 0.2273237425, 0.2277318641, 0.2285481073, 0.2291602898, 0.2295684114, 0.2297724722, 0.2314049587, 0.2322212019, 0.2334455668, 0.2340577492, 0.2352821141, 0.2358942965, 0.2365064789, 0.2387511478, 0.2389552087, 0.2401795735, 0.2407917559, 0.2420161208, 0.2424242424, 0.2426283032, 0.2432404857, 0.2442607897, 0.2456892154, 0.2469135802, 0.2475257627, 0.2479338843, 0.2487501275, 0.24936231, 0.2497704316, 0.2499744924, 0.2516069789, 0.2524232221, 0.253647587, 0.2542597694, 0.2554841343, 0.2560963167, 0.2567084991, 0.258953168, 0.2591572289, 0.2603815937, 0.2609937761, 0.262218141, 0.2626262626, 0.2628303234, 0.2634425059, 0.2644628099, 0.2658912356, 0.2671156004, 0.2677277829, 0.2681359045, 0.2689521477, 0.2695643302, 0.2699724518, 0.2701765126, 0.2718089991, 0.2726252423, 0.2738496072, 0.2744617896, 0.2756861545, 0.2762983369, 0.2769105193, 0.2791551882, 0.2793592491, 0.2805836139, 0.2811957963, 0.2824201612, 0.2828282828, 0.2830323436, 0.2836445261, 0.2846648301, 0.2860932558, 0.2873176207, 0.2879298031, 0.2883379247, 0.2891541679, 0.2897663504, 0.290174472, 0.2903785328, 0.2920110193, 0.2928272625, 0.2940516274, 0.2946638098, 0.2958881747, 0.2965003571, 0.2971125395, 0.2993572084, 0.2995612693, 0.3007856341, 0.3013978165, 0.3026221814, 0.303030303, 0.3032343638, 0.3038465463, 0.3048668503, 0.306295276, 0.3075196409, 0.3081318233, 0.3085399449, 0.3093561881, 0.3099683706, 0.3103764922, 0.310580553, 0.3122130395, 0.3130292827, 0.3142536476, 0.31486583, 0.3160901949, 0.3167023773, 0.3173145597, 0.3195592287, 0.3197632895, 0.3209876543, 0.3215998368, 0.3228242016, 0.3232323232, 0.323436384, 0.3240485665, 0.3250688705, 0.3264972962, 0.3277216611, 0.3283338435, 0.3287419651, 0.3295582083, 0.3301703908, 0.3305785124, 0.3307825732, 0.3324150597, 0.3332313029, 0.3344556678, 0.3350678502, 0.3362922151, 0.3369043975, 0.3375165799, 0.3397612489, 0.3399653097, 0.3411896745, 0.341801857, 0.3430262218, 0.3434343434, 0.3436384042, 0.3442505867, 0.3452708907, 0.3466993164, 0.3479236813, 0.3485358637, 0.3489439853, 0.3497602285, 0.350372411, 0.3507805326, 0.3509845934, 0.3526170799, 0.3534333231, 0.354657688, 0.3552698704, 0.3564942353, 0.3571064177, 0.3577186001, 0.3599632691, 0.3601673299, 0.3613916947, 0.3620038772, 0.363228242, 0.3636363636, 0.3638404244, 0.3644526069, 0.3654729109, 0.3669013366, 0.3681257015, 0.3687378839, 0.3691460055, 0.3699622488, 0.3705744312, 0.3709825528, 0.3711866136, 0.3728191001, 0.3736353433, 0.3748597082, 0.3754718906, 0.3766962555, 0.3773084379, 0.3779206203, 0.3801652893, 0.3803693501, 0.3815937149, 0.3822058974, 0.3834302622, 0.3838383838, 0.3840424446, 0.3846546271, 0.3856749311, 0.3871033568, 0.3883277217, 0.3889399041, 0.3893480257, 0.390164269, 0.3907764514, 0.391184573, 0.3913886338, 0.3930211203, 0.3938373635, 0.3950617284, 0.3956739108, 0.3968982757, 0.3975104581, 0.3981226405, 0.4003673095, 0.4005713703, 0.4017957351, 0.4024079176, 0.4036322824, 0.404040404, 0.4042444649, 0.4048566473, 0.4058769513, 0.407305377, 0.4085297419, 0.4091419243, 0.4095500459, 0.4103662892, 0.4109784716, 0.4113865932, 0.411590654, 0.4132231405, 0.4140393837, 0.4152637486, 0.415875931, 0.4171002959, 0.4177124783, 0.4183246607, 0.4205693297, 0.4207733905, 0.4219977553, 0.4226099378, 0.4238343026, 0.4242424242, 0.4244464851, 0.4250586675, 0.4260789715, 0.4275073972, 0.4287317621, 0.4293439445, 0.4297520661, 0.4305683094, 0.4311804918, 0.4315886134, 0.4317926742, 0.4334251607, 0.4342414039, 0.4354657688, 0.4360779512, 0.4373023161, 0.4379144985, 0.438526681, 0.4407713499, 0.4409754107, 0.4421997755, 0.442811958, 0.4440363228, 0.4444444444, 0.4446485053, 0.4452606877, 0.4462809917, 0.4477094174, 0.4489337823, 0.4495459647, 0.4499540863, 0.4507703296, 0.451382512, 0.4517906336, 0.4519946944, 0.4536271809, 0.4544434241, 0.455667789, 0.4562799714, 0.4575043363, 0.4581165187, 0.4587287012, 0.4609733701, 0.4611774309, 0.4624017957, 0.4630139782, 0.464238343, 0.4646464646, 0.4648505255, 0.4654627079, 0.4664830119, 0.4679114376, 0.4691358025, 0.4697479849, 0.4701561065, 0.4709723498, 0.4715845322, 0.4719926538, 0.4721967146, 0.4738292011, 0.4746454443, 0.4758698092, 0.4764819916, 0.4777063565, 0.4783185389, 0.4789307214, 0.4811753903, 0.4813794511, 0.4826038159, 0.4832159984, 0.4844403632, 0.4848484848, 0.4850525457, 0.4856647281, 0.4866850321, 0.4881134578, 0.4893378227, 0.4899500051, 0.4903581267, 0.49117437, 0.4917865524, 0.492194674, 0.4923987348, 0.4940312213, 0.4948474645, 0.4960718294, 0.4966840118, 0.4979083767, 0.4985205591, 0.4991327416, 0.5013774105, 0.5015814713, 0.5028058361, 0.5034180186, 0.5046423834, 0.5050505051, 0.5052545659, 0.5058667483, 0.5068870523, 0.508315478, 0.5095398429, 0.5101520253, 0.5105601469, 0.5113763902, 0.5119885726, 0.5123966942, 0.512600755, 0.5142332415, 0.5150494847, 0.5162738496, 0.516886032, 0.5181103969, 0.5187225793, 0.5193347618, 0.5215794307, 0.5217834915, 0.5230078563, 0.5236200388, 0.5248444036, 0.5252525253, 0.5254565861, 0.5260687685, 0.5270890725, 0.5285174982, 0.5297418631, 0.5303540455, 0.5307621671, 0.5315784104, 0.5321905928, 0.5325987144, 0.5328027752, 0.5344352617, 0.5352515049, 0.5364758698, 0.5370880522, 0.5383124171, 0.5389245995, 0.539536782, 0.5417814509, 0.5419855117, 0.5432098765, 0.543822059, 0.5450464238, 0.5454545455, 0.5456586063, 0.5462707887, 0.5472910927, 0.5487195184, 0.5499438833, 0.5505560657, 0.5509641873, 0.5517804306, 0.552392613, 0.5528007346, 0.5530047954, 0.5546372819, 0.5554535252, 0.55667789, 0.5572900724, 0.5585144373, 0.5591266197, 0.5597388022, 0.5619834711, 0.5621875319, 0.5634118967, 0.5640240792, 0.565248444, 0.5656565657, 0.5658606265, 0.5664728089, 0.5674931129, 0.5689215386, 0.5701459035, 0.5707580859, 0.5711662075, 0.5719824508, 0.5725946332, 0.5730027548, 0.5732068156, 0.5748393021, 0.5756555454, 0.5768799102, 0.5774920926, 0.5787164575, 0.5793286399, 0.5799408224, 0.5821854913, 0.5823895521, 0.5836139169, 0.5842260994, 0.5854504642, 0.5858585859, 0.5860626467, 0.5866748291, 0.5876951332, 0.5891235588, 0.5903479237, 0.5909601061, 0.5913682277, 0.592184471, 0.5927966534, 0.593204775, 0.5934088358, 0.5950413223, 0.5958575656, 0.5970819304, 0.5976941128, 0.5989184777, 0.5995306601, 0.6001428426, 0.6023875115, 0.6025915723, 0.6038159371, 0.6044281196, 0.6056524844, 0.6060606061, 0.6062646669, 0.6068768493, 0.6078971534, 0.609325579, 0.6105499439, 0.6111621263, 0.6115702479, 0.6123864912, 0.6129986736, 0.6134067952, 0.613610856, 0.6152433425, 0.6160595858, 0.6172839506, 0.617896133, 0.6191204979, 0.6197326803, 0.6203448628, 0.6225895317, 0.6227935925, 0.6240179574, 0.6246301398, 0.6258545046, 0.6262626263, 0.6264666871, 0.6270788695, 0.6280991736, 0.6295275992, 0.6307519641, 0.6313641465, 0.6317722681, 0.6325885114, 0.6332006938, 0.6336088154, 0.6338128762, 0.6354453627, 0.636261606, 0.6374859708, 0.6380981533, 0.6393225181, 0.6399347005, 0.640546883, 0.6427915519, 0.6429956127, 0.6442199776, 0.64483216, 0.6460565248, 0.6464646465, 0.6466687073, 0.6472808897, 0.6483011938, 0.6497296194, 0.6509539843, 0.6515661667, 0.6519742883, 0.6527905316, 0.653402714, 0.6538108356, 0.6540148964, 0.6556473829, 0.6564636262, 0.657687991, 0.6583001735, 0.6595245383, 0.6601367207, 0.6607489032, 0.6629935721, 0.6631976329, 0.6644219978, 0.6650341802, 0.666258545, 0.6666666667, 0.6668707275, 0.6674829099, 0.668503214, 0.6699316396, 0.6711560045, 0.6717681869, 0.6721763085, 0.6729925518, 0.6736047342, 0.6740128558, 0.6742169166, 0.6758494031, 0.6766656464, 0.6778900112, 0.6785021937, 0.6797265585, 0.6803387409, 0.6809509234, 0.6831955923, 0.6833996531, 0.684624018, 0.6852362004, 0.6864605652, 0.6868686869, 0.6870727477, 0.6876849301, 0.6887052342, 0.6901336598, 0.6913580247, 0.6919702071, 0.6923783287, 0.693194572, 0.6938067544, 0.694214876, 0.6944189368, 0.6960514233, 0.6968676666, 0.6980920314, 0.6987042139, 0.6999285787, 0.7005407611, 0.7011529436, 0.7033976125, 0.7036016733, 0.7048260382, 0.7054382206, 0.7066625855, 0.7070707071, 0.7072747679, 0.7078869503, 0.7089072544, 0.71033568, 0.7115600449, 0.7121722273, 0.7125803489, 0.7133965922, 0.7140087746, 0.7144168962, 0.714620957, 0.7162534435, 0.7170696868, 0.7182940516, 0.7189062341, 0.7201305989, 0.7207427813, 0.7213549638, 0.7235996327, 0.7238036935, 0.7250280584, 0.7256402408, 0.7268646057, 0.7272727273, 0.7274767881, 0.7280889705, 0.7291092746, 0.7305377002, 0.7317620651, 0.7323742475, 0.7327823691, 0.7335986124, 0.7342107948, 0.7346189164, 0.7348229772, 0.7364554637, 0.737271707, 0.7384960718, 0.7391082543, 0.7403326191, 0.7409448016, 0.741556984, 0.7438016529, 0.7440057137, 0.7452300786, 0.745842261, 0.7470666259, 0.7474747475, 0.7476788083, 0.7482909907, 0.7493112948, 0.7507397204, 0.7519640853, 0.7525762677, 0.7529843893, 0.7538006326, 0.754412815, 0.7548209366, 0.7550249974, 0.7566574839, 0.7574737272, 0.758698092, 0.7593102745, 0.7605346393, 0.7611468218, 0.7617590042, 0.7640036731, 0.7642077339, 0.7654320988, 0.7660442812, 0.7672686461, 0.7676767677, 0.7678808285, 0.7684930109, 0.769513315, 0.7709417406, 0.7721661055, 0.7727782879, 0.7731864096, 0.7740026528, 0.7746148352, 0.7750229568, 0.7752270177, 0.7768595041, 0.7776757474, 0.7789001122, 0.7795122947, 0.7807366595, 0.781348842, 0.7819610244, 0.7842056933, 0.7844097541, 0.785634119, 0.7862463014, 0.7874706663, 0.7878787879, 0.7880828487, 0.7886950311, 0.7897153352, 0.7911437608, 0.7923681257, 0.7929803081, 0.7933884298, 0.794204673, 0.7948168554, 0.795224977, 0.7954290379, 0.7970615243, 0.7978777676, 0.7991021324, 0.7997143149, 0.8009386797, 0.8015508622, 0.8021630446, 0.8044077135, 0.8046117743, 0.8058361392, 0.8064483216, 0.8076726865, 0.8080808081, 0.8082848689, 0.8088970513, 0.8099173554, 0.811345781, 0.8125701459, 0.8131823283, 0.81359045, 0.8144066932, 0.8150188756, 0.8154269972, 0.8156310581, 0.8172635445, 0.8180797878, 0.8193041526, 0.8199163351, 0.8211406999, 0.8217528824, 0.8223650648, 0.8246097337, 0.8248137945, 0.8260381594, 0.8266503418, 0.8278747067, 0.8282828283, 0.8284868891, 0.8290990715, 0.8301193756, 0.8315478012, 0.8327721661, 0.8333843485, 0.8337924702, 0.8346087134, 0.8352208958, 0.8356290174, 0.8358330783, 0.8374655647, 0.838281808, 0.8395061728, 0.8401183553, 0.8413427201, 0.8419549026, 0.842567085, 0.8448117539, 0.8450158147, 0.8462401796, 0.846852362, 0.8480767269, 0.8484848485, 0.8486889093, 0.8493010917, 0.8503213958, 0.8517498214, 0.8529741863, 0.8535863687, 0.8539944904, 0.8548107336, 0.855422916, 0.8558310376, 0.8560350985, 0.8576675849, 0.8584838282, 0.859708193, 0.8603203755, 0.8615447403, 0.8621569228, 0.8627691052, 0.8650137741, 0.8652178349, 0.8664421998, 0.8670543822, 0.8682787471, 0.8686868687, 0.8688909295, 0.8695031119, 0.870523416, 0.8719518416, 0.8731762065, 0.8737883889, 0.8741965106, 0.8750127538, 0.8756249362, 0.8760330579, 0.8762371187, 0.8778696051, 0.8786858484, 0.8799102132, 0.8805223957, 0.8817467605, 0.882358943, 0.8829711254, 0.8852157943, 0.8854198551, 0.88664422, 0.8872564024, 0.8884807673, 0.8888888889, 0.8890929497, 0.8897051321, 0.8907254362, 0.8921538619, 0.8933782267, 0.8939904091, 0.8943985308, 0.895214774, 0.8958269564, 0.8962350781, 0.8964391389, 0.8980716253, 0.8988878686, 0.9001122334, 0.9007244159, 0.9019487807, 0.9025609632, 0.9031731456, 0.9054178145, 0.9056218753, 0.9068462402, 0.9074584226, 0.9086827875, 0.9090909091, 0.9092949699, 0.9099071523, 0.9109274564, 0.9123558821, 0.9135802469, 0.9141924293, 0.914600551, 0.9154167942, 0.9160289766, 0.9164370983, 0.9166411591, 0.9182736455, 0.9190898888, 0.9203142536, 0.9209264361, 0.9221508009, 0.9227629834, 0.9233751658, 0.9256198347, 0.9258238955, 0.9270482604, 0.9276604428, 0.9288848077, 0.9292929293, 0.9294969901, 0.9301091725, 0.9311294766, 0.9325579023, 0.9337822671, 0.9343944495, 0.9348025712, 0.9356188144, 0.9362309968, 0.9366391185, 0.9368431793, 0.9384756657, 0.939291909, 0.9405162739, 0.9411284563, 0.9423528211, 0.9429650036, 0.943577186, 0.9458218549, 0.9460259157, 0.9472502806, 0.947862463, 0.9490868279, 0.9494949495, 0.9496990103, 0.9503111927, 0.9513314968, 0.9527599225, 0.9539842873, 0.9545964697, 0.9550045914, 0.9558208346, 0.956433017, 0.9568411387, 0.9570451995, 0.958677686, 0.9594939292, 0.9607182941, 0.9613304765, 0.9625548413, 0.9631670238, 0.9637792062, 0.9660238751, 0.9662279359, 0.9674523008, 0.9680644832, 0.9692888481, 0.9696969697, 0.9699010305, 0.9705132129, 0.971533517, 0.9729619427, 0.9741863075, 0.97479849, 0.9752066116, 0.9760228548, 0.9766350372, 0.9770431589, 0.9772472197, 0.9788797062, 0.9796959494, 0.9809203143, 0.9815324967, 0.9827568615, 0.983369044, 0.9839812264, 0.9862258953, 0.9864299561, 0.987654321, 0.9882665034, 0.9894908683, 0.9898989899, 0.9901030507, 0.9907152331, 0.9917355372, 0.9931639629, 0.9943883277, 0.9950005102, 0.9954086318, 0.996224875, 0.9968370574, 0.9972451791, 0.9974492399, 0.9990817264, 0.9998979696]
averages_even={0.0: [0.0, 0.6666666666667, 0.3333333333333], 0.306295276: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.2738496072: [0.2222222222222, 0.7777777777778], 0.5858585859: [0.0, 0.3333333333333, 0.6666666666667], 0.7789001122: [0.2222222222222, 0.7777777777778], 0.121416182: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4856647281: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.4207733905: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.3883277217: [0.4444444444444, 0.5555555555556], 0.323436384: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.8741965106: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.0367309458: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1469237833: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.003264973: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.4379144985: [0.0808080808081, 0.1919191919192, 0.8080808080808, 0.9191919191919], 0.9788797062: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9460259157: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.4407713499: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1101928375: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.7842056933: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.3434343434: [0.0, 0.6666666666667, 0.3333333333333], 0.1893684318: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.7250280584: [0.8888888888889, 0.1111111111111], 0.011835527: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.6023875115: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.5303540455: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.0197938986: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.4903581267: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5389245995: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.9864299561: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.5248444036: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.3930211203: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8890929497: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.9366391185: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.7593102745: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.640546883: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.6295275992: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.4848484848: [0.0, 0.3333333333333, 0.6666666666667], 0.1330476482: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.2689521477: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.2824201612: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.8125701459: [0.4444444444444, 0.5555555555556], 0.9882665034: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.9558208346: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.2389552087: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.8374655647: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2065095398: [0.4444444444444, 0.5555555555556], 0.7493112948: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.415875931: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.6246301398: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3834302622: [0.020202020202, 0.979797979798, 0.7979797979798, 0.2020202020202], 0.0877461484: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.2860932558: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.253647587: [0.2222222222222, 0.7777777777778], 0.7189062341: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.2475257627: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.7125803489: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.1244770942: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.4654627079: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.4005713703: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.3681257015: [0.4444444444444, 0.5555555555556], 0.0920314254: [0.2222222222222, 0.7777777777778], 0.3032343638: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.5472910927: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.391184573: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8962350781: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4826038159: [0.1111111111111, 0.8888888888889], 0.6650341802: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3526170799: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9384756657: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.794204673: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4205693297: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.7391082543: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.7438016529: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1616161616: [0.0, 0.6666666666667, 0.3333333333333], 0.4048566473: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.6197326803: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.9190898888: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.9343944495: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.1175390266: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.983369044: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.5876951332: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.3728191001: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.6987042139: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1716151413: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4646464646: [0.0, 0.3333333333333, 0.6666666666667], 0.8695031119: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.1391694725: [0.8888888888889, 0.1111111111111], 0.8682787471: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.7721661055: [0.4444444444444, 0.5555555555556], 0.8652178349: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.6717681869: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.9154167942: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.8829711254: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.785634119: [0.8888888888889, 0.1111111111111], 0.1801856953: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3956739108: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.0369350066: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.1477400265: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3307825732: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.7403326191: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.2658912356: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.1836547291: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8484848485: [0.0, 0.3333333333333, 0.6666666666667], 0.8082848689: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4777063565: [0.1313131313131, 0.6868686868687, 0.8686868686869, 0.3131313131313], 0.4452606877: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.0950923375: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.3803693501: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.3479236813: [0.4444444444444, 0.5555555555556], 0.0320375472: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.1267217631: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.7036016733: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.9680644832: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.4948474645: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4624017957: [0.1111111111111, 0.8888888888889], 0.8217528824: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.0993776145: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3975104581: [0.0808080808081, 0.1919191919192, 0.8080808080808, 0.9191919191919], 0.8980716253: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.4003673095: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.0167329864: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.7033976125: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.9182736455: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.303030303: [0.0, 0.6666666666667, 0.3333333333333], 0.152637486: [0.2222222222222, 0.7777777777778], 0.9972451791: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4499540863: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6595245383: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.0220385675: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.6460565248: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.4219977553: [0.1111111111111, 0.8888888888889], 0.5162738496: [0.2222222222222, 0.7777777777778], 0.5730027548: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.0624426079: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.6868686869: [0.0, 0.3333333333333, 0.6666666666667], 0.7470666259: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.5297418631: [0.4444444444444, 0.5555555555556], 0.9074584226: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.0546882971: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.2187531885: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.7776757474: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.1863075196: [0.4444444444444, 0.5555555555556], 0.6803387409: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.0938679727: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.3430262218: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.0234669932: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.0504030201: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.9164370983: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.9741863075: [0.4444444444444, 0.5555555555556], 0.2273237425: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.4899500051: [0.9292929292929, 0.7070707070707, 0.2929292929293, 0.0707070707071], 0.4575043363: [0.1313131313131, 0.6868686868687, 0.8686868686869, 0.3131313131313], 0.4250586675: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.0406081012: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.9256198347: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.3601673299: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.3277216611: [0.4444444444444, 0.5555555555556], 0.3599632691: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.97479849: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.2628303234: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0657075809: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.6766656464: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4630139782: [0.040404040404, 0.5959595959596, 0.4040404040404, 0.959595959596], 0.0589735741: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.4746454443: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4789307214: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.4421997755: [0.1111111111111, 0.8888888888889], 0.7364554637: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3773084379: [0.0808080808081, 0.1919191919192, 0.8080808080808, 0.9191919191919], 0.8627691052: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.3801652893: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.6629935721: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.3411896745: [0.1111111111111, 0.8888888888889], 0.2828282828: [0.0, 0.6666666666667, 0.3333333333333], 0.4917865524: [0.5252525252525, 0.4747474747475, 0.2525252525253, 0.7474747474747], 0.7746148352: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.9568411387: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4297520661: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8180797878: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0185695337: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.0540761147: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.3324150597: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.5674931129: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.1514131211: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.5732068156: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.508315478: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.64483216: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.958677686: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9968370574: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.6123864912: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4832159984: [0.040404040404, 0.5959595959596, 0.4040404040404, 0.959595959596], 0.8346087134: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.8021630446: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.737271707: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.7048260382: [0.8888888888889, 0.1111111111111], 0.1599836751: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.0099989797: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.1275380063: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.2903785328: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.3489439853: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.9662279359: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.2334455668: [0.2222222222222, 0.7777777777778], 0.8688909295: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4373023161: [0.1313131313131, 0.6868686868687, 0.8686868686869, 0.3131313131313], 0.1012141618: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4113865932: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.3399653097: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.3075196409: [0.4444444444444, 0.5555555555556], 0.3173145597: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.5909601061: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.2420161208: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.9356188144: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.6944189368: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.4544434241: [0.1010101010101, 0.8989898989899, 0.989898989899, 0.010101010101], 0.1054994388: [0.4444444444444, 0.5555555555556], 0.3571064177: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.0899908173: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.6225895317: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.2626262626: [0.0, 0.6666666666667, 0.3333333333333], 0.5252525253: [0.0, 0.3333333333333, 0.6666666666667], 0.9166411591: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.5656565657: [0.0, 0.3333333333333, 0.6666666666667], 0.4715845322: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.666258545: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.5321905928: [0.5252525252525, 0.4747474747475, 0.2525252525253, 0.7474747474747], 0.4095500459: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8278747067: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.8737883889: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.3122130395: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.5270890725: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5976941128: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.565248444: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.5328027752: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.2163044587: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.9660238751: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8088970513: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.2295684114: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.592184471: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.7235996327: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8266503418: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.1985511682: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.7617590042: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.0165289256: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1661054994: [0.4444444444444, 0.5555555555556], 0.5995306601: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.9827568615: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.2701765126: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.0675441282: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.3287419651: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.9907152331: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.3856749311: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9258238955: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.6466687073: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8284868891: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.2071217223: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.104275074: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.3846546271: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.5689215386: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.3197632895: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.0718294052: [0.2222222222222, 0.7777777777778], 0.9109274564: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9839812264: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.5989184777: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.9233751658: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.895214774: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.2156922763: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.108560351: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.4017957351: [0.1111111111111, 0.8888888888889], 0.3369043975: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.9539842873: [0.4444444444444, 0.5555555555556], 0.8260381594: [0.8888888888889, 0.1111111111111], 0.3397612489: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.5821854913: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8044077135: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.9550045914: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6472808897: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.112845628: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.9809203143: [0.2222222222222, 0.7777777777778], 0.8760330579: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.5823895521: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.0973370064: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.3893480257: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.543822059: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.2920110193: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.5572900724: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1312111009: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.3142536476: [0.2222222222222, 0.7777777777778], 0.3048668503: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2897663504: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.7978777676: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6831955923: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1707988981: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.7538006326: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3509845934: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6564636262: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6240179574: [0.8888888888889, 0.1111111111111], 0.1397816549: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.7213549638: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.7182940516: [0.2222222222222, 0.7777777777778], 0.2442607897: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9770431589: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.2456892154: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.8539944904: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8854198551: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.2132435466: [0.2222222222222, 0.7777777777778], 0.7880828487: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.7440057137: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.6258545046: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.148352209: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.5285174982: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.0748903173: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.2671156004: [0.4444444444444, 0.5555555555556], 0.855422916: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.6203448628: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.2218141006: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.8548107336: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.0473421079: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.4813794511: [0.3232323232323, 0.2323232323232, 0.6767676767677, 0.7676767676768], 0.4140393837: [0.1010101010101, 0.8989898989899, 0.989898989899, 0.010101010101], 0.3815937149: [0.1111111111111, 0.8888888888889], 0.0791755943: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3167023773: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.3195592287: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.5417814509: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8211406999: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.4960718294: [0.2222222222222, 0.7777777777778], 0.4311804918: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.492194674: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.7272727273: [0.0, 0.3333333333333, 0.6666666666667], 0.3013978165: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.0051015203: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2718089991: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.0679522498: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4979083767: [0.1313131313131, 0.6868686868687, 0.8686868686869, 0.3131313131313], 0.4171002959: [0.1313131313131, 0.6868686868687, 0.8686868686869, 0.3131313131313], 0.9348025712: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5352515049: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.2093663912: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.9931639629: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.7268646057: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.6427915519: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.745842261: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.7133965922: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.5454545455: [0.0, 0.3333333333333, 0.6666666666667], 0.6160595858: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.1459034792: [0.4444444444444, 0.5555555555556], 0.5187225793: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.0344862769: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.0583613917: [0.8888888888889, 0.1111111111111], 0.5193347618: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.9099071523: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.8450158147: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.6115702479: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.0467299255: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.8933782267: [0.4444444444444, 0.5555555555556], 0.617896133: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1544740333: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.5530047954: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.2793592491: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.6968676666: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.5836139169: [0.8888888888889, 0.1111111111111], 0.4489337823: [0.4444444444444, 0.5555555555556], 0.8650137741: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1624324049: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.8144066932: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.1146821753: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.5793286399: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.3709825528: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.3938373635: [0.1010101010101, 0.8989898989899, 0.989898989899, 0.010101010101], 0.3613916947: [0.1111111111111, 0.8888888888889], 0.0495867769: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.5719824508: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.1746760535: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.5013774105: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.2365064789: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.1189674523: [0.8888888888889, 0.1111111111111], 0.4109784716: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.0018365473: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2811957963: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.9429650036: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.2516069789: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3305785124: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.5181103969: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.9545964697: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.8888888889: [0.0, 0.3333333333333, 0.6666666666667], 0.2479338843: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8943985308: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.7970615243: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2873176207: [0.4444444444444, 0.5555555555556], 0.1505968779: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.6729925518: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.5050505051: [0.0, 0.3333333333333, 0.6666666666667], 0.5756555454: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.5432098765: [0.8888888888889, 0.1111111111111], 0.2240587695: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5934088358: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.9019487807: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.2254871952: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.8046117743: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.1930415264: [0.2222222222222, 0.7777777777778], 0.7072747679: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.101010101: [0.0, 0.6666666666667, 0.3333333333333], 0.5774920926: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.5450464238: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.1281501888: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.5046423834: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.2591572289: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.9954086318: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5236200388: [0.040404040404, 0.5959595959596, 0.4040404040404, 0.959595959596], 0.8786858484: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.2340577492: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.2016120804: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.7740026528: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.438526681: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.1691664116: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6442199776: [0.8888888888889, 0.1111111111111], 0.3736353433: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0852974186: [0.4444444444444, 0.5555555555556], 0.8358330783: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.2762983369: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.0697887971: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2426283032: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.4991327416: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.0202020202: [0.0, 0.6666666666667, 0.3333333333333], 0.4881134578: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.1220283645: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.455667789: [0.2222222222222, 0.7777777777778], 0.8064483216: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.3907764514: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.0055096419: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6833996531: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.2609937761: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.5052545659: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.1954902561: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.4342414039: [0.1010101010101, 0.8989898989899, 0.989898989899, 0.010101010101], 0.9607182941: [0.2222222222222, 0.7777777777778], 0.9513314968: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.3754718906: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.5217834915: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.0179573513: [0.8888888888889, 0.1111111111111], 0.7566574839: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.189164371: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.0808080808: [0.0, 0.6666666666667, 0.3333333333333], 0.7323742475: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.5619834711: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.6001428426: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.125701459: [0.4444444444444, 0.5555555555556], 0.5028058361: [0.8888888888889, 0.1111111111111], 0.0314253648: [0.2222222222222, 0.7777777777778], 0.9123558821: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.684624018: [0.8888888888889, 0.1111111111111], 0.8558310376: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8615447403: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.8290990715: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.0826446281: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.7642077339: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.7317620651: [0.4444444444444, 0.5555555555556], 0.1667176819: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.5370880522: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1342720131: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.7874706663: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.2387511478: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.2401795735: [0.8888888888889, 0.1111111111111], 0.7933884298: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8958269564: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.7660442812: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.7335986124: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.1752882359: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.636261606: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6038159371: [0.1111111111111, 0.8888888888889], 0.0883583308: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.3209876543: [0.1111111111111, 0.8888888888889], 0.2560963167: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.3534333231: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.2487501275: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.9625548413: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.9301091725: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.5950413223: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.0112233446: [0.2222222222222, 0.7777777777778], 0.4679114376: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.9696969697: [0.0, 0.3333333333333, 0.6666666666667], 0.4354657688: [0.2222222222222, 0.7777777777778], 0.0926436078: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.7066625855: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.8480767269: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.3085399449: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.987654321: [0.8888888888889, 0.1111111111111], 0.4701561065: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5364758698: [0.2222222222222, 0.7777777777778], 0.4850525457: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8448117539: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.7507397204: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.9917355372: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2277318641: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.3552698704: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.81359045: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.3228242016: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.7162534435: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9056218753: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.539536782: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.1303948577: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9135802469: [0.4444444444444, 0.5555555555556], 0.4697479849: [0.9292929292929, 0.7070707070707, 0.2929292929293, 0.0707070707071], 0.451382512: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.8486889093: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.2038567493: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9950005102: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.205285175: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.3103764922: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6913580247: [0.4444444444444, 0.5555555555556], 0.1728395062: [0.2222222222222, 0.7777777777778], 0.6264666871: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.9141924293: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.7476788083: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.914600551: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.2971125395: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.0069380675: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.9203142536: [0.2222222222222, 0.7777777777778], 0.213855729: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.0534639323: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.1814100602: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.693194572: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.6380981533: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1489643914: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.5634118967: [0.8888888888889, 0.1111111111111], 0.939291909: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.3007856341: [0.1111111111111, 0.8888888888889], 0.9974492399: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.859708193: [0.2222222222222, 0.7777777777778], 0.222426283: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.55667789: [0.2222222222222, 0.7777777777778], 0.8248137945: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.4477094174: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.7274767881: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4152637486: [0.2222222222222, 0.7777777777778], 0.9490868279: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.404040404: [0.0, 0.6666666666667, 0.3333333333333], 0.350372411: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.3705744312: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.024691358: [0.4444444444444, 0.5555555555556], 0.956433017: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.8333843485: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.9337822671: [0.4444444444444, 0.5555555555556], 0.4648505255: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.6980920314: [0.2222222222222, 0.7777777777778], 0.8009386797: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.668503214: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.0008162432: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.7731864096: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.3026221814: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.1689623508: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4495459647: [0.9292929292929, 0.7070707070707, 0.2929292929293, 0.0707070707071], 0.2424242424: [0.0, 0.6666666666667, 0.3333333333333], 0.9362309968: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.0661157025: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.7750229568: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4440363228: [0.020202020202, 0.979797979798, 0.7979797979798, 0.2020202020202], 0.290174472: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6509539843: [0.4444444444444, 0.5555555555556], 0.1465156617: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.0271400877: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.947862463: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.411590654: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.2185491276: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8686868687: [0.0, 0.3333333333333, 0.6666666666667], 0.7768595041: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2199775533: [0.8888888888889, 0.1111111111111], 0.8150188756: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.6336088154: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6852362004: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.1550862157: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.5554535252: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.5230078563: [0.1111111111111, 0.8888888888889], 0.2805836139: [0.8888888888889, 0.1111111111111], 0.4866850321: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.1679420467: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.0142842567: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.2285481073: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.8817467605: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.2677277829: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.1961024385: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.7519640853: [0.4444444444444, 0.5555555555556], 0.6870727477: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0987654321: [0.8888888888889, 0.1111111111111], 0.6497296194: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.8778696051: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3301703908: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.7207427813: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.5419855117: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.5068870523: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8327721661: [0.4444444444444, 0.5555555555556], 0.9160289766: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.5095398429: [0.4444444444444, 0.5555555555556], 0.4446485053: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0130598918: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.0908070605: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.31486583: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.0706050403: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6354453627: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9276604428: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.5585144373: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.0614223038: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.9705132129: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.7862463014: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.9325579023: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.1073359861: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4293439445: [0.9292929292929, 0.7070707070707, 0.2929292929293, 0.0707070707071], 0.3968982757: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.5842260994: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.0091827365: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.0459136823: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.2699724518: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6105499439: [0.4444444444444, 0.5555555555556], 0.0381593715: [0.8888888888889, 0.1111111111111], 0.5456586063: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.838281808: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.9311294766: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8337924702: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.4244464851: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.5591266197: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.8395061728: [0.2222222222222, 0.7777777777778], 0.1936537088: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.16120804: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.5799408224: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.1287623712: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.846852362: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.6527905316: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.5891235588: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.4664830119: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.0606060606: [0.0, 0.6666666666667, 0.3333333333333], 0.7327823691: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5970819304: [0.2222222222222, 0.7777777777778], 0.8413427201: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.2022242628: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.7115600449: [0.4444444444444, 0.5555555555556], 0.2089582696: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.1018263443: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.3748597082: [0.2222222222222, 0.7777777777778], 0.516886032: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.3099683706: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.9092949699: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0608101214: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.2432404857: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.8756249362: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.4893378227: [0.4444444444444, 0.5555555555556], 0.178349148: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.1061116213: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2946638098: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.0736659525: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.262218141: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.1487603306: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8301193756: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9503111927: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.7676767677: [0.0, 0.3333333333333, 0.6666666666667], 0.4091419243: [0.9292929292929, 0.7070707070707, 0.2929292929293, 0.0707070707071], 0.3766962555: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.3442505867: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.694214876: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.9729619427: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.1263136415: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.49117437: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4940312213: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8907254362: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.1983471074: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.6960514233: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2993572084: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.7342107948: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.6044281196: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.1348841955: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.6483011938: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2830323436: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.7997143149: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.5768799102: [0.2222222222222, 0.7777777777778], 0.8576675849: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.4462809917: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2407917559: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.060197939: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.2083460871: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.6583001735: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.6864605652: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.1759004183: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.6711560045: [0.4444444444444, 0.5555555555556], 0.6062646669: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.3644526069: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.0724415876: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.3232323232: [0.0, 0.6666666666667, 0.3333333333333], 0.24936231: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.9001122334: [0.2222222222222, 0.7777777777778], 0.8352208958: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.4691358025: [0.4444444444444, 0.5555555555556], 0.7054382206: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.4042444649: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.2995612693: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.7238036935: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.2744617896: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.5546372819: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.0399959188: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.3240485665: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.5150494847: [0.1010101010101, 0.8989898989899, 0.989898989899, 0.010101010101], 0.3889399041: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.3564942353: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.0810121416: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.4275073972: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.1634527089: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.6538108356: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.1177430874: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.4709723498: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4738292011: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8503213958: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.7529843893: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.9998979696: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6556473829: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2791551882: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1410060198: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.8872564024: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.9494949495: [0.0, 0.3333333333333, 0.6666666666667], 0.6338128762: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.7409448016: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.1065197429: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.9227629834: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.7548209366: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.7929803081: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.7605346393: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.1820222426: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0455055607: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.6307519641: [0.4444444444444, 0.5555555555556], 0.5658606265: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8154269972: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.2695643302: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.5640240792: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.943577186: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.9894908683: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.9570451995: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.8921538619: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.2230384655: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.5787164575: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.7948168554: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.1122334456: [0.2222222222222, 0.7777777777778], 0.1581471278: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.6325885114: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.9527599225: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.2542597694: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.5215794307: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1285583104: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6060606061: [0.0, 0.3333333333333, 0.6666666666667], 0.5101520253: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.4985205591: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.9692888481: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.4334251607: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3687378839: [0.0707070707071, 0.7070707070707, 0.2929292929293, 0.9292929292929], 0.3362922151: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.0840730538: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.3038465463: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.8223650648: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.6134067952: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.7256402408: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.1208039996: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.4507703296: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4183246607: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.4536271809: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8099173554: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.1781450872: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.6607489032: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.6152433425: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.258953168: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.5958575656: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.7654320988: [0.8888888888889, 0.1111111111111], 0.3350678502: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.8172635445: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.9699010305: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.9090909091: [0.0, 0.3333333333333, 0.6666666666667], 0.7611468218: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.4058769513: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.882358943: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.7144168962: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.1881440669: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.7201305989: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.6876849301: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.1556983981: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.5903479237: [0.4444444444444, 0.5555555555556], 0.5254565861: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.1734516886: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.056932966: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.9815324967: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.7752270177: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.2291602898: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.8517498214: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.8193041526: [0.2222222222222, 0.7777777777778], 0.754412815: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.3638404244: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.5597388022: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.0063258851: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.9898989899: [0.0, 0.3333333333333, 0.6666666666667], 0.6887052342: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5315784104: [0.6464646464646, 0.5353535353535, 0.3535353535354, 0.4646464646465], 0.7121722273: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.9031731456: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.8535863687: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.4783185389: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.7886950311: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.6129986736: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.7807366595: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.0871339659: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3485358637: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.3160901949: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.2836445261: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.08815427: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.1432506887: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9221508009: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.3093561881: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.4305683094: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3981226405: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.8493010917: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.1083562902: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.3332313029: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.769513315: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8356290174: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6721763085: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5748393021: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.6785021937: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.7923681257: [0.4444444444444, 0.5555555555556], 0.613610856: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6601367207: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.3691460055: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.4260789715: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.6740128558: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.2883379247: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6797265585: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.1618202224: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.5487195184: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.5499438833: [0.4444444444444, 0.5555555555556], 0.8076726865: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.870523416: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2352821141: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.9086827875: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.8762371187: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.811345781: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.2028364453: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.7140087746: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.1379451076: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.0859096011: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.6631976329: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.0044893378: [0.4444444444444, 0.5555555555556], 0.2497704316: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8419549026: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.8131823283: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.4581165187: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.7482909907: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.7672686461: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.3283338435: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2958881747: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.2634425059: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.6644219978: [0.8888888888889, 0.1111111111111], 0.5325987144: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.442811958: [0.040404040404, 0.5959595959596, 0.4040404040404, 0.959595959596], 0.4103662892: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3779206203: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.0944801551: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.4132231405: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3130292827: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.7291092746: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.157943067: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.6317722681: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6464646465: [0.0, 0.3333333333333, 0.6666666666667], 0.5344352617: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.4923987348: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.842567085: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.020406081: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.2314049587: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8282828283: [0.0, 0.3333333333333, 0.6666666666667], 0.3654729109: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8584838282: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.1910009183: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.7452300786: [0.8888888888889, 0.1111111111111], 0.2681359045: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6393225181: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.6068768493: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.1354963779: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.0338740945: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.9292929293: [0.0, 0.3333333333333, 0.6666666666667], 0.2075298439: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.9007244159: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.6540148964: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.0522395674: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.7709417406: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.7384960718: [0.2222222222222, 0.7777777777778], 0.6736047342: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.5113763902: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.8080808081: [0.0, 0.3333333333333, 0.6666666666667], 0.7525762677: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.5505560657: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.971533517: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2499744924: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.9674523008: [0.8888888888889, 0.1111111111111], 0.9025609632: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.310580553: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.7727782879: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.1850831548: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.7078869503: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.6429956127: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.7819610244: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.0275482094: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.3081318233: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2756861545: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.7346189164: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8731762065: [0.4444444444444, 0.5555555555556], 0.0002040608: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.6809509234: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.9368431793: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.8719518416: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.4226099378: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.0975410672: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.390164269: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3577186001: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.9901030507: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.2928272625: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0650953984: [0.4444444444444, 0.5555555555556], 0.2603815937: [0.8888888888889, 0.1111111111111], 0.5913682277: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.0422405877: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9990817264: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.3840424446: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.0332619121: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.4721967146: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.407305377: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.8852157943: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.7878787879: [0.0, 0.3333333333333, 0.6666666666667], 0.3452708907: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.0863177227: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.051627385: [0.2222222222222, 0.7777777777778], 0.593204775: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6923783287: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6313641465: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.1416182022: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.5664728089: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.5015814713: [0.3232323232323, 0.2323232323232, 0.6767676767677, 0.7676767676768], 0.0265279053: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.1230486685: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.7305377002: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.7897153352: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.2150800939: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.3436384042: [0.1414141414141, 0.8585858585859, 0.5858585858586, 0.4141414141414], 0.7954290379: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1826344251: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.512600755: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6332006938: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.5034180186: [0.040404040404, 0.5959595959596, 0.4040404040404, 0.959595959596], 0.7795122947: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.9862258953: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.2222222222: [0.0, 0.6666666666667, 0.3333333333333], 0.363228242: [0.020202020202, 0.979797979798, 0.7979797979798, 0.2020202020202], 0.9270482604: [0.8888888888889, 0.1111111111111], 0.8621569228: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.9423528211: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.5142332415: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.6999285787: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.6674829099: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.6025915723: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.5701459035: [0.4444444444444, 0.5555555555556], 0.9496990103: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8603203755: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.2879298031: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2554841343: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.9613304765: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1212121212: [0.0, 0.6666666666667, 0.3333333333333], 0.8964391389: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.8315478012: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.1997755331: [0.8888888888889, 0.1111111111111], 0.1006019794: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.3699622488: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.0293847567: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.2726252423: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0681563106: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1869197021: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.1377410468: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8939904091: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.8058361392: [0.8888888888889, 0.1111111111111], 0.4844403632: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.7011529436: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.4519946944: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.3507805326: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.3871033568: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.2112029385: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.8750127538: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.7474747475: [0.0, 0.3333333333333, 0.6666666666667], 0.3250688705: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5528007346: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.5854504642: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.5260687685: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.9594939292: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4719926538: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.1873278237: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.8199163351: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.6519742883: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.1887562494: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.6901336598: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.657687991: [0.2222222222222, 0.7777777777778], 0.5927966534: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.9458218549: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.4242424242: [0.0, 0.3333333333333, 0.6666666666667], 0.9631670238: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.2297724722: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.88664422: [0.8888888888889, 0.1111111111111], 0.741556984: [0.1616161616162, 0.8383838383838, 0.6161616161616, 0.3838383838384], 0.0412202836: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.6919702071: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.1648811346: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.6270788695: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.5621875319: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.1324354658: [0.2222222222222, 0.7777777777778], 0.5860626467: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.0669319457: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.9209264361: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.8884807673: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.8560350985: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.7911437608: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.758698092: [0.2222222222222, 0.7777777777778], 0.3822058974: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.3497602285: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.3636363636: [0.0, 0.6666666666667, 0.3333333333333], 0.0404040404: [0.0, 0.6666666666667, 0.3333333333333], 0.9766350372: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.2524232221: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6078971534: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5105601469: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.2469135802: [0.4444444444444, 0.5555555555556], 0.4966840118: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.464238343: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.4317926742: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1079481686: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.4587287012: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.3669013366: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.3344556678: [0.2222222222222, 0.7777777777778], 0.7070707071: [0.0, 0.3333333333333, 0.6666666666667], 0.0136720743: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.7280889705: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.5123966942: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.0638710336: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.4517906336: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.8529741863: [0.4444444444444, 0.5555555555556], 0.7170696868: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.7089072544: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9772472197: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1948780737: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.714620957: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.0302009999: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.6172839506: [0.2222222222222, 0.7777777777778], 0.552392613: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.6742169166: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.8799102132: [0.2222222222222, 0.7777777777778], 0.0210182634: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.9054178145: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.9760228548: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.202020202: [0.0, 0.6666666666667, 0.3333333333333], 0.6668707275: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8462401796: [0.8888888888889, 0.1111111111111], 0.781348842: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.7991021324: [0.2222222222222, 0.7777777777778], 0.1414141414: [0.0, 0.6666666666667, 0.3333333333333], 0.6515661667: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.6191204979: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.5866748291: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.2322212019: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.2965003571: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.00755025: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.8805223957: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.2120191817: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.8156310581: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.0448933782: [0.4444444444444, 0.5555555555556], 0.1795735129: [0.8888888888889, 0.1111111111111], 0.653402714: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.3620038772: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.3295582083: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.0742781349: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.0387715539: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.9796959494: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.9472502806: [0.8888888888889, 0.1111111111111], 0.2205897357: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.4764819916: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1110090807: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0277522702: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.4024079176: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.3466993164: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.7640036731: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.0785634119: [0.8888888888889, 0.1111111111111], 0.0477502296: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.6666666667: [0.0, 0.3333333333333, 0.6666666666667], 0.7574737272: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.2846648301: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5517804306: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.1152943577: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.4611774309: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.4287317621: [0.4444444444444, 0.5555555555556], 0.3375165799: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.4315886134: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.9288848077: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.7550249974: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1671258035: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.5711662075: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.7348229772: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.1685542292: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.609325579: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.0253035405: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.5119885726: [0.5252525252525, 0.4747474747475, 0.2525252525253, 0.7474747474747], 0.1195796347: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.4758698092: [0.2222222222222, 0.7777777777778], 0.4811753903: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.795224977: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.4177124783: [0.0808080808081, 0.1919191919192, 0.8080808080808, 0.9191919191919], 0.3838383838: [0.0, 0.6666666666667, 0.3333333333333], 0.209570452: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6374859708: [0.2222222222222, 0.7777777777778], 0.9752066116: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6111621263: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.1446791144: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.5462707887: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.0571370268: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.5509641873: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6938067544: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.0073461892: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.7678808285: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.8401183553: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.3215998368: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.71033568: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.6778900112: [0.2222222222222, 0.7777777777778], 0.1532496684: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.341801857: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.077339047: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.2769105193: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.6758494031: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.7684930109: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.2358942965: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.22671156: [0.4444444444444, 0.5555555555556], 0.9068462402: [0.8888888888889, 0.1111111111111], 0.7844097541: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.4562799714: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.1140699929: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.4238343026: [0.020202020202, 0.979797979798, 0.7979797979798, 0.2020202020202], 0.6056524844: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.3913886338: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.081624324: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.3264972962: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.2940516274: [0.2222222222222, 0.7777777777778], 0.6262626263: [0.0, 0.3333333333333, 0.6666666666667], 0.2644628099: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.3950617284: [0.2222222222222, 0.7777777777778], 0.8897051321: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.4409754107: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.4085297419: [0.4444444444444, 0.5555555555556], 0.1028466483: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.9405162739: [0.2222222222222, 0.7777777777778], 0.0257116621: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.6399347005: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.6280991736: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.5307621671: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.0436690134: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.1422303847: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.0771349862: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.8988878686: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.4609733701: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.9411284563: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.8246097337: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.1818181818: [0.0, 0.6666666666667, 0.3333333333333], 0.354657688: [0.2222222222222, 0.7777777777778], 0.7005407611: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.8015508622: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.5707580859: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.5383124171: [0.8686868686869, 0.1313131313131, 0.3131313131313, 0.6868686868687], 0.5058667483: [0.1717171717172, 0.7171717171717, 0.8282828282828, 0.2828282828283], 0.6227935925: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.9943883277: [0.4444444444444, 0.5555555555556], 0.9294969901: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.1918171615: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.0479542904: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.6699316396: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.1593714927: [0.8888888888889, 0.1111111111111], 0.5725946332: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.0803999592: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.2891541679: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.2567084991: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.8670543822: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.996224875: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.9637792062: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.4444444444: [0.0, 0.3333333333333, 0.6666666666667], 0.8664421998: [0.8888888888889, 0.1111111111111], 0.2003877155: [0.040404040404, 0.959595959596, 0.4040404040404, 0.5959595959596], 0.4360779512: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.4036322824: [0.020202020202, 0.979797979798, 0.7979797979798, 0.2020202020202], 0.3711866136: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505]}
averages_odd={0.824507703: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.805325987: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.292929293: [0.0, 0.3333333333333, 0.6666666666667], 0.843485359: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.778594021: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.713702683: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.681257015: [0.4444444444444, 0.5555555555556], 0.192735435: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.372104887: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.339659218: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.188450158: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.913274156: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.34251607: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.834914805: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.46087134: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.845321906: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.648199163: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.650647893: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.585756555: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.091113152: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.421691664: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.356800326: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.233139476: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.29476584: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.363534333: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.879604122: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.420467299: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.935720845: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.365370881: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.81103969: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.095398429: [0.4444444444444, 0.5555555555556], 0.208448118: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.858585859: [0.0, 0.6666666666667, 0.3333333333333], 0.663911846: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.09825528: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.920008162: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.35006632: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.317620651: [0.4444444444444, 0.5555555555556], 0.067238037: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.616365677: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.665748393: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.703907765: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.412508928: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.367207428: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.95959596: [0.0, 0.6666666666667, 0.3333333333333], 0.34761759: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.071523314: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.764921947: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.272727273: [0.0, 0.3333333333333, 0.6666666666667], 0.738189981: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.640852974: [0.4444444444444, 0.5555555555556], 0.932863993: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.575961637: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.828180798: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.319457198: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.287011529: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.961432507: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.8640955: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.32231405: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.239873482: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.724109785: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.804917866: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.138251199: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.642689521: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.0580553: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.809815325: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.512906846: [0.8888888888889, 0.1111111111111], 0.304152637: [0.2222222222222, 0.7777777777778], 0.552086522: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.898989899: [0.0, 0.6666666666667, 0.3333333333333], 0.524334252: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.956126926: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.87164575: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.27456382: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.179267422: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.741863075: [0.4444444444444, 0.5555555555556], 0.312723192: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.280277523: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.421487603: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.970819304: [0.2222222222222, 0.7777777777778], 0.910213244: [0.2222222222222, 0.7777777777778], 0.102744618: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.882052852: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.062340577: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.3298643: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.297418631: [0.4444444444444, 0.5555555555556], 0.47107438: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.722681359: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.63717988: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.880624426: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.625344353: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.533721049: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.131517192: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.347005408: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.919191919: [0.0, 0.6666666666667, 0.3333333333333], 0.726354454: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.579022549: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.724517906: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.683093562: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.002958882: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.252525253: [0.0, 0.3333333333333, 0.6666666666667], 0.13865932: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.461483522: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.535557596: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.111315172: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.973268034: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.331700847: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.299255178: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.266809509: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.397204367: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.861850832: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.057443118: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.446178961: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.569839812: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.248852158: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.017447199: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.048260382: [0.8888888888889, 0.1111111111111], 0.316396286: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.283950617: [0.2222222222222, 0.7777777777778], 0.987144169: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.673298643: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.955922865: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.83124171: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.2543618: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.043362922: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.495765738: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.636567697: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.030507091: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.260075503: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.087440057: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.401285583: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.906540149: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.226405469: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.766758494: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.028058361: [0.8888888888889, 0.1111111111111], 0.865523926: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.452300786: [0.8888888888889, 0.1111111111111], 0.763901643: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.091725334: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.45087236: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.682277319: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.601061116: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.405774921: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.716763596: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.424140394: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.107642077: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.878787879: [0.0, 0.6666666666667, 0.3333333333333], 0.912049791: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.825528007: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.063564942: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.488419549: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.814712784: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.6573819: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.178655239: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.441281502: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.033568003: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.379247016: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.279053158: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.886338129: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.635955515: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.844709723: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.219671462: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.594327109: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.154780124: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.425976941: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.393531272: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.059687787: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.587593103: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.859402102: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.02009999: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.953066014: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.482093664: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.823283338: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.758392001: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.122946638: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.661054994: [0.4444444444444, 0.5555555555556], 0.475563718: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.41067238: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.791449852: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.733904704: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.03785328: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.381083563: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.542699725: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.98734823: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.750433629: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.386797266: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.69533721: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.023160902: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.427813488: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.739210285: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.333333333: [0.0, 0.3333333333333, 0.6666666666667], 0.762677278: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.653096623: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.924293439: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.403938374: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.371492705: [0.8888888888889, 0.1111111111111], 0.553923069: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.885113764: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.777369656: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.309458219: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.643709826: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.959187838: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.519640853: [0.4444444444444, 0.5555555555556], 0.421079482: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.897357413: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.014590348: [0.4444444444444, 0.5555555555556], 0.751045812: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.922456892: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.010917253: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.742883379: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.261707989: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.924701561: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.035812672: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.683705744: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.013365983: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.120293848: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.5214774: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.373329252: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.340883583: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.228650138: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.939393939: [0.0, 0.6666666666667, 0.3333333333333], 0.588817468: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.977553311: [0.8888888888889, 0.1111111111111], 0.912661973: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.556371799: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.165187226: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.71798796: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.965921845: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.620650954: [0.4444444444444, 0.5555555555556], 0.555759616: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.39047036: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.111927354: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.406999286: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.979389858: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.360881543: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.946944189: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.206203449: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.849607183: [0.2222222222222, 0.7777777777778], 0.784715845: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.141312111: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.842873176: [0.4444444444444, 0.5555555555556], 0.334149577: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.735741251: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.180899908: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.4912764: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.313131313: [0.0, 0.3333333333333, 0.6666666666667], 0.670849913: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.818998061: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.149882665: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.40312213: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.351290685: [0.8888888888889, 0.1111111111111], 0.009692888: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.460055096: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.688603204: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.807978778: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.603305785: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.157024793: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.993470054: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.605958576: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.684317927: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.158453219: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.508621569: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.271094786: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.001734517: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.201101928: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.702479339: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.605142332: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.708193042: [0.2222222222222, 0.7777777777778], 0.643301704: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.293745536: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.134578104: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.073972044: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.353127232: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.320681563: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.255790225: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.817773697: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.872257933: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.710029589: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.467605346: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.612692582: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.515355576: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.143148658: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.337822671: [0.4444444444444, 0.5555555555556], 0.938985818: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.841648811: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.509233752: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.484746454: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.601469238: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.932251811: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.662891542: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.390266299: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.617998163: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.568615447: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.561881441: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.996530966: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.266197327: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.845730028: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.158861341: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.656157535: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.802469136: [0.4444444444444, 0.5555555555556], 0.562901745: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.197020712: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.127844098: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.536169779: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.250892766: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.759412305: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.662075298: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.765126008: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.109478625: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.686766656: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.495153556: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.136414652: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.757167636: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.957963473: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.619426589: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.332925212: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.300479543: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.782267116: [0.4444444444444, 0.5555555555556], 0.008468524: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.122334456: [0.2222222222222, 0.7777777777778], 0.91694725: [0.8888888888889, 0.1111111111111], 0.246607489: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.669625548: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.181716151: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.144985206: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.447403326: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.414957657: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.116620753: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.252729313: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.763085399: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.852055913: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.513519029: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.797979798: [0.0, 0.3333333333333, 0.6666666666667], 0.118049179: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.190286705: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.464544434: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.432098765: [0.8888888888889, 0.1111111111111], 0.053770023: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.661667177: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.370064279: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.160697888: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.931027446: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.674523008: [0.8888888888889, 0.1111111111111], 0.770635649: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.198857259: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.481685542: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.129680645: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.996326905: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.814508724: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.560044893: [0.4444444444444, 0.5555555555556], 0.419651056: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.522497704: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.999591878: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.136822773: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.994082237: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.425364759: [0.2222222222222, 0.7777777777778], 0.01010101: [0.0, 0.3333333333333, 0.6666666666667], 0.094174064: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.351902867: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.719008264: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.641465157: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.621671258: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.23415978: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.048872564: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.031119274: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.474951536: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.690439751: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.63473115: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.609019488: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.098459341: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.678604224: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.915518825: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.818181818: [0.0, 0.6666666666667, 0.3333333333333], 0.358024691: [0.4444444444444, 0.5555555555556], 0.028670544: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.85634119: [0.8888888888889, 0.1111111111111], 0.623507805: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.596775839: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.034792368: [0.4444444444444, 0.5555555555556], 0.394755637: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.652892562: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.070298949: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.820018365: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.300275482: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.192123253: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.825732068: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.023773084: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.663503724: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.760840731: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.444342414: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.411896745: [0.8888888888889, 0.1111111111111], 0.781042751: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.199265381: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.043975105: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.349862259: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.056014692: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.957351291: [0.8888888888889, 0.1111111111111], 0.892459953: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.168248138: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.96469748: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.69778594: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.67513519: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.496786042: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.799816345: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.782879298: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.676767677: [0.0, 0.3333333333333, 0.6666666666667], 0.856953372: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.302112029: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.176818692: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.144373023: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.340271401: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.711253954: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.837975717: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.545352515: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.278236915: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.320069381: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.581267218: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.298438935: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.185389246: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.522089583: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.494541373: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.32496684: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.875114784: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.945719825: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.777777778: [0.0, 0.6666666666667, 0.3333333333333], 0.946740129: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.583103765: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.336598306: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.161514131: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.129068462: [0.8888888888889, 0.1111111111111], 0.374553617: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.309662279: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.91939598: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.021324355: [0.2222222222222, 0.7777777777778], 0.779614325: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.280073462: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.785328028: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.687991021: [0.2222222222222, 0.7777777777778], 0.721661055: [0.4444444444444, 0.5555555555556], 0.623099684: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.170084685: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.137639016: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.391694725: [0.8888888888889, 0.1111111111111], 0.326803387: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.976124885: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.076216713: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.562493623: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.329660239: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.96714621: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.016426895: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.722273237: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.296194266: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.114376084: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.385572901: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.804305683: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.476584022: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.708805224: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.951229466: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.281910009: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.501887562: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.789001122: [0.2222222222222, 0.7777777777778], 0.908172635: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.118661361: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.774308744: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.87654321: [0.8888888888889, 0.1111111111111], 0.223956739: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.540863177: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.821446791: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.980002041: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.050505051: [0.0, 0.3333333333333, 0.6666666666667], 0.30476482: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.272319151: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.090500969: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.834710744: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.737373737: [0.0, 0.3333333333333, 0.6666666666667], 0.232527293: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.163350679: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.13090501: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.548413427: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.953678196: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.107438017: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.904091419: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.454545455: [0.0, 0.3333333333333, 0.6666666666667], 0.354351597: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.289460259: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.25701459: [0.4444444444444, 0.5555555555556], 0.986123865: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.259871442: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.208652178: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.171921233: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.68003265: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.468829711: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.582695643: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.748597082: [0.2222222222222, 0.7777777777778], 0.67758392: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.755943271: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.306601367: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.179063361: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.908988879: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.811651872: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.217222732: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.148046118: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.616977859: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.250280584: [0.8888888888889, 0.1111111111111], 0.456382002: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.291296806: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.878379757: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.845934088: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.225793286: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.078257321: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.156616672: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.981634527: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.768799102: [0.2222222222222, 0.7777777777778], 0.597796143: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.500459137: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.131313131: [0.0, 0.3333333333333, 0.6666666666667], 0.70023467: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.50617284: [0.2222222222222, 0.7777777777778], 0.439444955: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.580859096: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2845628: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.252117131: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.97979798: [0.0, 0.6666666666667, 0.3333333333333], 0.696969697: [0.0, 0.3333333333333, 0.6666666666667], 0.502295684: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.870421386: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.508009387: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.241505969: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.269258239: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.096418733: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.210488726: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.825119886: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.698806244: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.971431487: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.704519947: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.504132231: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.448627691: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.03030303: [0.0, 0.3333333333333, 0.6666666666667], 0.383736353: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.102132435: [0.2222222222222, 0.7777777777778], 0.868584838: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.286399347: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.21905928: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.933476176: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.807162534: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.346393225: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.771247832: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.902254872: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.239669421: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.498214468: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.414141414: [0.0, 0.3333333333333, 0.6666666666667], 0.400877461: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.630445873: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.436179982: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.850219365: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.72043669: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.338842975: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.195184165: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.072543618: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.74063871: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.054994388: [0.4444444444444, 0.5555555555556], 0.891847771: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.001122334: [0.2222222222222, 0.7777777777778], 0.110702989: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.729007244: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.133149679: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.557392103: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.803081318: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.203754719: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.17130905: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.966942149: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.474747475: [0.0, 0.3333333333333, 0.6666666666667], 0.394143455: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.923681257: [0.4444444444444, 0.5555555555556], 0.948576676: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.744923987: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.26436078: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.906336088: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.890011223: [0.2222222222222, 0.7777777777778], 0.656565657: [0.0, 0.3333333333333, 0.6666666666667], 0.340679522: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.212325273: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.298030813: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.378838894: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.647586981: [0.2222222222222, 0.7777777777778], 0.219467401: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.050096929: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.658402204: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.08070605: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.561065197: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.006631976: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.664115907: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.100908071: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.156004489: [0.4444444444444, 0.5555555555556], 0.428425671: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.331088664: [0.8888888888889, 0.1111111111111], 0.854912764: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.269054178: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.795735129: [0.8888888888889, 0.1111111111111], 0.730843791: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.555555556: [0.0, 0.3333333333333, 0.6666666666667], 0.105193348: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.164575043: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.445566779: [0.2222222222222, 0.7777777777778], 0.380675441: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.860422406: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.627997143: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.762065095: [0.4444444444444, 0.5555555555556], 0.318640955: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.866136109: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.991633507: [0.2626262626263, 0.7373737373737, 0.6262626262626, 0.3737373737374], 0.830017345: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.797571676: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.068462402: [0.8888888888889, 0.1111111111111], 0.667789001: [0.2222222222222, 0.7777777777778], 0.638200184: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.077032956: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.583307826: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.5667789: [0.2222222222222, 0.7777777777778], 0.079889807: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.838383838: [0.0, 0.6666666666667, 0.3333333333333], 0.831853892: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.04030201: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.026833997: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.373941435: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.989796959: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.684113866: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.006427916: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.713498623: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.616161616: [0.0, 0.3333333333333, 0.6666666666667], 0.024385267: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.320477502: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.502499745: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.767574737: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.730231609: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.358636874: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.326191205: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.221303949: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.021936537: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.423324151: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.883277217: [0.4444444444444, 0.5555555555556], 0.485970819: [0.2222222222222, 0.7777777777778], 0.157841037: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.520661157: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.623711866: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.044587287: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.52637486: [0.2222222222222, 0.7777777777778], 0.632894603: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.408223651: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.343332313: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.310886644: [0.8888888888889, 0.1111111111111], 0.439853076: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.197428834: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.600448934: [0.4444444444444, 0.5555555555556], 0.035404551: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.595959596: [0.0, 0.3333333333333, 0.6666666666667], 0.235588205: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.092745638: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.755331089: [0.8888888888889, 0.1111111111111], 0.649423528: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.864911744: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.560657076: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.528211407: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.360473421: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.173553719: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.984287318: [0.4444444444444, 0.5555555555556], 0.244158759: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.968778696: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.174982145: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.142536476: [0.2222222222222, 0.7777777777778], 0.440669319: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.926129987: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.34516886: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.714927048: [0.8888888888889, 0.1111111111111], 0.744311805: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.478012448: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.888786858: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.399449036: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.183552699: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.15110703: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.992857872: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.494949495: [0.0, 0.3333333333333, 0.6666666666667], 0.65983063: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.359044995: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.82369146: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.121110091: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.666564636: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.037649219: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.338434854: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.305989185: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.273543516: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.017039078: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.777981839: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.577594123: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.742475258: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.832466075: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.615753495: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.518416488: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.135802469: [0.4444444444444, 0.5555555555556], 0.764513825: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.323130293: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.290684624: [0.8888888888889, 0.1111111111111], 0.237424753: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.046831956: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.774104683: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.812264055: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.747372717: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.486583002: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.650035711: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.437608407: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.520253035: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.097234976: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.905927967: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.847566575: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.375573921: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.917559433: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.245995307: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.100091827: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.602285481: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.645546373: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.749209264: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.943883277: [0.4444444444444, 0.5555555555556], 0.454749515: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.58698092: [0.2222222222222, 0.7777777777778], 0.829405163: [0.2222222222222, 0.7777777777778], 0.592490562: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.069074584: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.292521171: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.222120192: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.978165493: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.983062953: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.848382818: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.815937149: [0.8888888888889, 0.1111111111111], 0.87899194: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.607182941: [0.2222222222222, 0.7777777777778], 0.205591266: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.827364555: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.632690542: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.230690746: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.198245077: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.007856341: [0.8888888888889, 0.1111111111111], 0.448015509: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.622487501: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.015610652: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.489031731: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.602897664: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.285787165: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.253341496: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.077645138: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.94449546: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.2392613: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.537190083: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.432710948: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.400265279: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.929190899: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.928374656: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.302928273: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.270482604: [0.8888888888889, 0.1111111111111], 0.213957759: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.177226814: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.733700643: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.636363636: [0.0, 0.3333333333333, 0.6666666666667], 0.215386185: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.182940516: [0.2222222222222, 0.7777777777778], 0.482297725: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.706968677: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.011937557: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.384960718: [0.2222222222222, 0.7777777777778], 0.717375778: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.032139578: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.355371901: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.479236813: [0.4444444444444, 0.5555555555556], 0.153351699: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.769411285: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.798796041: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.19151107: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.159065401: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.151515152: [0.0, 0.3333333333333, 0.6666666666667], 0.54657688: [0.2222222222222, 0.7777777777778], 0.789613305: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.070707071: [0.0, 0.3333333333333, 0.6666666666667], 0.555147434: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.872870115: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.937761453: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.869809203: [0.2222222222222, 0.7777777777778], 0.710641771: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.89674523: [0.8888888888889, 0.1111111111111], 0.108866442: [0.8888888888889, 0.1111111111111], 0.419242934: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.004183247: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.950617284: [0.2222222222222, 0.7777777777778], 0.715539231: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.939598: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.840220386: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.643913886: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.702071217: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.564738292: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.567391083: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.487807367: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.380063259: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.117436996: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.985307622: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.887970615: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.479848995: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.925517804: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.693296602: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.184777064: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.572288542: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.06050403: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.56922763: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.462095705: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.539842873: [0.4444444444444, 0.5555555555556], 0.364758698: [0.2222222222222, 0.7777777777778], 0.621263136: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.089276604: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.191919192: [0.0, 0.3333333333333, 0.6666666666667], 0.988980716: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.335169881: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.798183859: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.668401184: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.603509846: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.538618508: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.414345475: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.670237731: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.816549332: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.168044077: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.976328946: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.238649117: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.201918172: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.169472503: [0.8888888888889, 0.1111111111111], 0.735129068: [0.8888888888889, 0.1111111111111], 0.399040914: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.366595245: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.097847158: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.055606571: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.964085297: [0.4444444444444, 0.5555555555556], 0.540455056: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.178043057: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.145597388: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.746556474: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.050709111: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.483930211: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.531884502: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.217630854: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.041526375: [0.2222222222222, 0.7777777777778], 0.289256198: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.910825426: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.083766963: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.359861239: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.32741557: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.9848995: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.905315784: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.036628915: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.801244771: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.193755739: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.277216611: [0.4444444444444, 0.5555555555556], 0.626160596: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.593714927: [0.8888888888889, 0.1111111111111], 0.162738496: [0.2222222222222, 0.7777777777778], 0.377002347: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.344556678: [0.2222222222222, 0.7777777777778], 0.27966534: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.090909091: [0.0, 0.3333333333333, 0.6666666666667], 0.575349454: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.855116825: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.822671156: [0.4444444444444, 0.5555555555556], 0.525150495: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.491480461: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.595551474: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.563105806: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.89919396: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.138863381: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.461891644: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.584940312: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.997755331: [0.8888888888889, 0.1111111111111], 0.986736047: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.942658912: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.915722885: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.757575758: [0.0, 0.3333333333333, 0.6666666666667], 0.792062034: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.694725028: [0.8888888888889, 0.1111111111111], 0.62983369: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.500051015: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.898581777: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.064177125: [0.0808080808081, 0.1919191919192, 0.9191919191919, 0.8080808080808], 0.313947556: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.249056219: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.736965616: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.511478421: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.067033976: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.237832874: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.858789919: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.639628609: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.05785124: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.493317008: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.696561575: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.257626773: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.01765126: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.48107336: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.225181104: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.893072135: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.542291603: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.960412203: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.904499541: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.30721355: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.232323232: [0.0, 0.3333333333333, 0.6666666666667], 0.599224569: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.415977961: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.612488522: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.515151515: [0.0, 0.3333333333333, 0.6666666666667], 0.112947658: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.132129375: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.454137333: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.553310887: [0.8888888888889, 0.1111111111111], 0.01520253: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.875318845: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.324354658: [0.2222222222222, 0.7777777777778], 0.25946332: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.171717172: [0.0, 0.3333333333333, 0.6666666666667], 0.949392919: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.242322212: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.209876543: [0.8888888888889, 0.1111111111111], 0.516988062: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.471278441: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.438832772: [0.4444444444444, 0.5555555555556], 0.522701765: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.952453831: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.895316804: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.441689624: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.727170697: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.946332007: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.147842057: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.784103663: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.218447097: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.186001428: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.149270483: [0.8888888888889, 0.1111111111111], 0.654320988: [0.8888888888889, 0.1111111111111], 0.58942965: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.140495868: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.526987042: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.393939394: [0.0, 0.3333333333333, 0.6666666666667], 0.980614223: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.836139169: [0.8888888888889, 0.1111111111111], 0.84389348: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.717171717: [0.0, 0.6666666666667, 0.3333333333333], 0.103968983: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.473114988: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.125395368: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.052341598: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.443526171: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.689827569: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.921028466: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.787776757: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.203142536: [0.2222222222222, 0.7777777777778], 0.463728191: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.075808591: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.111111111: [0.0, 0.3333333333333, 0.6666666666667], 0.590041832: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.081930415: [0.2222222222222, 0.7777777777778], 0.572084481: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.36271809: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.890623406: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.610243853: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.433935313: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.401489644: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.465768799: [0.2222222222222, 0.7777777777778], 0.836751352: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.511682481: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.211713091: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.839200082: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.676971738: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.547189062: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.418630752: [0.4444444444444, 0.5555555555556], 0.514743394: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.353739414: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.534945414: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.084379145: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.867768595: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.998367514: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.95980002: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.087235996: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.673094582: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.743699622: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.558820529: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.613916947: [0.8888888888889, 0.1111111111111], 0.60963167: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.468217529: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.54902561: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.457810428: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.926742169: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.088664422: [0.8888888888889, 0.1111111111111], 0.373737374: [0.0, 0.3333333333333, 0.6666666666667], 0.972655851: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.228854199: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.940210183: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.19640853: [0.4444444444444, 0.5555555555556], 0.417406387: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.528823589: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.452912968: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.38802163: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.395775941: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.818385879: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.918783798: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.54167942: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.434343434: [0.0, 0.3333333333333, 0.6666666666667], 0.685950413: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.204979084: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.172533415: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.405162738: [0.2222222222222, 0.7777777777778], 0.863687379: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.212121212: [0.0, 0.3333333333333, 0.6666666666667], 0.064789307: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.531680441: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.115600449: [0.4444444444444, 0.5555555555556], 0.459646975: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.504948475: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.413733293: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.381287624: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.805530048: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.466380982: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.965105601: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.188246097: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.010305071: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.701459035: [0.4444444444444, 0.5555555555556], 0.189674523: [0.8888888888889, 0.1111111111111], 0.506785022: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.430874401: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.398428732: [0.4444444444444, 0.5555555555556], 0.333537394: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.877155392: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.529435772: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.962860933: [0.5656565656566, 0.6565656565657, 0.4343434343434, 0.3434343434343], 0.930415264: [0.2222222222222, 0.7777777777778], 0.061728395: [0.2222222222222, 0.7777777777778], 0.127640037: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.535353535: [0.0, 0.3333333333333, 0.6666666666667], 0.124171003: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.573512907: [0.8888888888889, 0.1111111111111], 0.165799408: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.318232833: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.926538108: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.353535354: [0.0, 0.3333333333333, 0.6666666666667], 0.899806142: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.627384961: [0.2222222222222, 0.7777777777778], 0.1184573: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.026629936: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.737577798: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.607795123: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.542903785: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.096010611: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.36781961: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.243546577: [0.2222222222222, 0.7777777777778], 0.574125089: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.966534027: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.681869197: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.499438833: [0.4444444444444, 0.5555555555556], 0.771860014: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.311498827: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.434547495: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.056830936: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.78328742: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.676359555: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.973880216: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.258034894: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.969594939: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.361085604: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.104581165: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.263748597: [0.2222222222222, 0.7777777777778], 0.93714927: [0.8888888888889, 0.1111111111111], 0.785123967: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.852668095: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.731455974: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.596163657: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.79083767: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.378226712: [0.4444444444444, 0.5555555555556], 0.693500663: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.313335374: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.884297521: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.786960514: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.895520865: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.23681257: [0.4444444444444, 0.5555555555556], 0.592286501: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.451076421: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.492704826: [0.8888888888889, 0.1111111111111], 0.565554535: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.533108866: [0.8888888888889, 0.1111111111111], 0.43067034: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.751657994: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.265585144: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.886134068: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.047036017: [0.2323232323232, 0.7676767676768, 0.6767676767677, 0.3232323232323], 0.865932048: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.245383124: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.212937455: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.17620651: [0.4444444444444, 0.5555555555556], 0.697173758: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.838587899: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.042138557: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.480257117: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.38292011: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.282726252: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.084991327: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.991021324: [0.2222222222222, 0.7777777777778], 0.019283747: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.796347311: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.152331395: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.634118967: [0.8888888888889, 0.1111111111111], 0.753902663: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.629221508: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.258851138: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.775533109: [0.8888888888889, 0.1111111111111], 0.29986736: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.051321294: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.863075196: [0.4444444444444, 0.5555555555556], 0.230078563: [0.8888888888889, 0.1111111111111], 0.641873278: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.935924906: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.794306703: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.070911132: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.427201306: [0.1313131313131, 0.6868686868687, 0.3131313131313, 0.8686868686869], 0.275992246: [0.3434343434343, 0.6565656565657, 0.4343434343434, 0.5656565656566], 0.029894909: [0.020202020202, 0.2020202020202, 0.979797979798, 0.7979797979798], 0.744719927: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.003571064: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.037241098: [0.7474747474747, 0.5252525252525, 0.2525252525253, 0.4747474747475], 0.455361698: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.776145291: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.515967758: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.075196409: [0.4444444444444, 0.5555555555556], 0.903479237: [0.4444444444444, 0.5555555555556], 0.941230487: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.293133354: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.703295582: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.07805326: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.794510764: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.551882461: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.65493317: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.472502806: [0.8888888888889, 0.1111111111111], 0.407611468: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.116212631: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.80348944: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.646362616: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.41046832: [0.0606060606061, 0.9393939393939, 0.3939393939394, 0.7272727272727, 0.6060606060606, 0.2727272727273], 0.277828793: [0.9292929292929, 0.0707070707071, 0.2929292929293, 0.7070707070707], 0.883889399: [0.9292929292929, 0.2929292929293, 0.7070707070707, 0.0707070707071], 0.85144373: [0.6868686868687, 0.3131313131313, 0.8686868686869, 0.1313131313131], 0.754106724: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.656769717: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.392306907: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.944903581: [0.1212121212121, 0.4545454545455, 0.8787878787879, 0.2121212121212, 0.7878787878788, 0.5454545454545], 0.262524232: [0.010101010101, 0.8989898989899, 0.989898989899, 0.1010101010101], 0.223344557: [0.2222222222222, 0.7777777777778], 0.885725946: [0.2525252525253, 0.5252525252525, 0.4747474747475, 0.7474747474747], 0.723497602: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.691051933: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.858177737: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.474339353: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.441893684: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.124783185: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919], 0.724721967: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.31496786: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.231915111: [0.020202020202, 0.979797979798, 0.2020202020202, 0.7979797979798], 0.199469442: [0.949494949495, 0.0505050505051, 0.9494949494949, 0.4949494949495, 0.5050505050505], 0.757779818: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.728395062: [0.2222222222222, 0.7777777777778], 0.459034792: [0.4444444444444, 0.5555555555556], 0.582083461: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.704315886: [0.4848484848485, 0.8484848484848, 0.8181818181818, 0.1818181818182, 0.1515151515152, 0.5151515151515], 0.151719212: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.576573819: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.580246914: [0.4444444444444, 0.5555555555556], 0.435159678: [0.2828282828283, 0.1717171717172, 0.8282828282828, 0.7171717171717], 0.37026834: [0.3232323232323, 0.7676767676768, 0.2323232323232, 0.6767676767677], 0.900826446: [0.2424242424242, 0.9090909090909, 0.4242424242424, 0.7575757575758, 0.0909090909091, 0.5757575757576], 0.039485767: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.272931334: [0.1414141414141, 0.8585858585859, 0.4141414141414, 0.5858585858586], 0.706152433: [0.969696969697, 0.030303030303, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.544536272: [0.030303030303, 0.969696969697, 0.3636363636364, 0.6969696969697, 0.6363636363636, 0.3030303030303], 0.809203143: [0.2222222222222, 0.7777777777778], 0.50127538: [0.4646464646465, 0.5353535353535, 0.3535353535354, 0.6464646464646], 0.21661055: [0.4444444444444, 0.5555555555556], 0.61452913: [0.040404040404, 0.4040404040404, 0.959595959596, 0.5959595959596], 0.549637792: [0.1616161616162, 0.3838383838384, 0.6161616161616, 0.8383838383838], 0.082542598: [0.2626262626263, 0.7373737373737, 0.3737373737374, 0.6262626262626], 0.575757576: [0.0, 0.3333333333333, 0.6666666666667], 0.387409448: [0.0808080808081, 0.8080808080808, 0.1919191919192, 0.9191919191919]}
| 49,898
| 102,227
| 0.791903
| 31,448
| 249,490
| 6.282339
| 0.07886
| 0.002065
| 0.001564
| 0.014031
| 0.819221
| 0.773302
| 0.773302
| 0.755384
| 0.755101
| 0.356861
| 0
| 0.844892
| 0.063001
| 249,490
| 5
| 102,228
| 49,898
| 0.000235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
841439fb5785ae61dd41d908de54c3f89a04589b
| 3,013
|
py
|
Python
|
oss_server/explorer/v1/forms.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 10
|
2018-01-30T06:21:43.000Z
|
2022-01-03T12:18:07.000Z
|
oss_server/explorer/v1/forms.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 4
|
2018-02-21T02:30:37.000Z
|
2018-03-04T05:20:30.000Z
|
oss_server/explorer/v1/forms.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 2
|
2018-01-31T04:22:52.000Z
|
2018-03-10T14:04:35.000Z
|
from django import forms
class GetAddressTxsForm(forms.Form):
starting_after = forms.CharField(required=False, min_length=64, max_length=64,
error_messages={
'invalid': '`starting_after` is invalid',
'min_length': 'length of `starting_after` should be exactly 64',
'max_length': 'length of `starting_after` should be exactly 64'
})
since = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`since` is invalid',
'min_value': '`since` should be greater than or equal to %(limit_value)s'
})
until = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`until` is invalid',
'min_value': '`until` should be greater than or equal to %(limit_value)s'
})
page_size = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`page_size` is invalid',
'min_value': '`page_size` should be greater than or equal to %(limit_value)s'
})
class GetBlocksForm(forms.Form):
starting_after = forms.CharField(required=False, min_length=64, max_length=64,
error_messages={
'invalid': '`starting_after` is invalid',
'min_length': 'length of `starting_after` should be exactly 64',
'max_length': 'length of `starting_after` should be exactly 64'
})
since = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`since` is invalid',
'min_value': '`since` should be greater than or equal to %(limit_value)s'
})
until = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`until` is invalid',
'min_value': '`until` should be greater than or equal to %(limit_value)s'
})
page_size = forms.IntegerField(required=False, min_value=0,
error_messages={
'invalid': '`page_size` is invalid',
'min_value': '`page_size` should be greater than or equal to %(limit_value)s'
})
| 57.942308
| 116
| 0.438433
| 258
| 3,013
| 4.934109
| 0.151163
| 0.075412
| 0.10055
| 0.141398
| 0.952082
| 0.952082
| 0.952082
| 0.952082
| 0.952082
| 0.952082
| 0
| 0.013968
| 0.477265
| 3,013
| 51
| 117
| 59.078431
| 0.794286
| 0
| 0
| 0.933333
| 0
| 0
| 0.286948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022222
| 0
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
841873dc2a9c0ad53c81c81d30bb8d5f14c94294
| 92
|
py
|
Python
|
tests/emdrp/emdrp/test_dpMergeProbs.py
|
erjel/emdrp
|
0b04a164989dd2f8ab8d1defc38353a6c0c11c8c
|
[
"MIT"
] | 4
|
2020-01-14T14:41:14.000Z
|
2022-01-08T11:12:27.000Z
|
tests/emdrp/emdrp/test_dpMergeProbs.py
|
erjel/emdrp
|
0b04a164989dd2f8ab8d1defc38353a6c0c11c8c
|
[
"MIT"
] | 1
|
2021-09-23T19:59:08.000Z
|
2021-09-23T19:59:08.000Z
|
tests/emdrp/emdrp/test_dpMergeProbs.py
|
erjel/emdrp
|
0b04a164989dd2f8ab8d1defc38353a6c0c11c8c
|
[
"MIT"
] | 1
|
2021-03-02T15:25:48.000Z
|
2021-03-02T15:25:48.000Z
|
import pytest
def test_imports():
from emdrp.dpMergeProbs import dpMergeProbs
pass
| 18.4
| 47
| 0.76087
| 11
| 92
| 6.272727
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195652
| 92
| 5
| 48
| 18.4
| 0.932432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.75
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
84443b26edba20dbf4d7eb25a546aa7b4aabe246
| 14,871
|
py
|
Python
|
tests/behave/features/steps/evolsuperoperator_dephasing.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 14
|
2016-10-16T13:26:05.000Z
|
2021-11-09T11:40:52.000Z
|
tests/behave/features/steps/evolsuperoperator_dephasing.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 61
|
2016-09-19T10:45:56.000Z
|
2021-11-10T13:53:06.000Z
|
tests/behave/features/steps/evolsuperoperator_dephasing.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 21
|
2016-08-30T09:09:28.000Z
|
2022-03-30T03:16:35.000Z
|
"""
Autogenerated by ghenerate script, part of Quantarhei
http://github.com/tmancal74/quantarhei
Tomas Mancal, tmancal74@gmai.com
Generated on: 2018-11-07 09:41:47
Edit the functions below to give them desired functionality.
In present version of `ghenerate`, no edits or replacements
are perfomed in the feature file text.
"""
from behave import given
from behave import when
from behave import then
import numpy
import quantarhei as qr
#
# Given ...
#
@given('I have PureDephasing object D with dephasing time {dtime} and initial density matrix R')
def step_given_1(context, dtime):
"""
Given I have PureDephasing object D with dephasing time {dtime} and initial density matrix R
"""
td = float(dtime)
print("Dephasing time", td)
context.td = td
# create test aggregatedimer
agg = qr.TestAggregate("trimer-2")
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,100.0)
agg.set_resonance_coupling(1,2,50.0)
agg.build()
HH = agg.get_Hamiltonian()
context.H = HH
print("Hamiltonian:")
print(HH)
# initial density matrix
R = qr.ReducedDensityMatrix(dim=HH.dim)
with qr.eigenbasis_of(HH):
R.data[1:4,1:4] = 0.5
context.R = R
dd = numpy.zeros((4,4), dtype=qr.REAL)
dd[1,2] = 1.0/td
dd[2,1] = 1.0/td
dd[1,3] = 1.0/td
dd[3,1] = 1.0/td
dd[2,3] = 1.0/td
dd[3,2] = 1.0/td
D = qr.qm.PureDephasing(drates=dd)
context.D = D
#
# When ...
#
@when('I calculate EvolutionSuperOperator using only PureDephasing D with {time_step} and {N_dense}')
def step_when_2(context, time_step, N_dense):
"""
When I calculate EvolutionSuperOperator using only PureDephasing D with {time_step} and {N_dense}
"""
# get the associated time axis and the relaxation tensor and Hamiltonian
dt = float(time_step)
N_dense = int(N_dense)
time = qr.TimeAxis(0, 1320, 1.0)
time2 = qr.TimeAxis(0, 132, dt)
context.time = time
context.time2 = time2
HH = context.H
DD = context.D
print("Dephasing type: ", DD.dtype)
# This tests if it is possible to ignore relaxation tensor in defining
# evolution superoperator
L = qr.qm.LindbladForm(HH, sbi=None)
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L, pdeph=DD)
U.set_dense_dt(N_dense)
with qr.eigenbasis_of(HH):
U.calculate()
context.U = U
#
# And ...
#
@when('I apply the EvolutionSuperOperator to R to get RD at time {t_prop}')
def step_when_3(context, t_prop):
"""
And I apply the EvolutionSuperOperator to R to get RD
"""
U = context.U
R = context.R
t = float(t_prop)
RD = U.apply(t, R)
context.RD = RD
#
# And ...
#
@when('I multiply each coherence element by corresponding exponential decay with dephasing time {dtime} to get RE at time {t_prop}')
def step_when_4(context, dtime, t_prop):
"""
And I multiply each coherence element by corresponding exponential decay with dephasing time {dtime} to get RE
"""
gamma = 1.0/float(dtime)
t = float(t_prop)
R = context.R
HH = context.H
RE = qr.ReducedDensityMatrix(dim=HH.dim)
RE.data[:,:] = 0.0
with qr.eigenbasis_of(HH):
for i in range(4):
for j in range(4):
om = HH.data[i,i]-HH.data[j,j]
if i != j:
RE.data[i,j] = R.data[i,j]*numpy.exp(-1j*om*t -gamma*t)
else:
RE.data[i,j] = R.data[i,j]*numpy.exp(-1j*om*t)
context.RE = RE
#
# Then ...
#
@then('RD equals RE at times {t_prop}')
def step_then_5(context, t_prop):
"""
Then RD equals RE at times {t_prop}
"""
RD = context.RD
RE = context.RE
with qr.eigenbasis_of(context.H):
print(context.R)
print("\nCalculated with U:")
print("Shape: ", RD.data.shape)
print(RD.data)
print("\nReference calculation:")
print("Shape: ", RE.data.shape)
print(RE.data)
print("\nMax diff:", numpy.amax(numpy.abs(RD.data - RE.data)))
numpy.testing.assert_allclose(RD.data, RE.data, rtol=1.0e-5,
atol=1.0e-5)
#
# Given ...
#
@given('I have PureDephasing object D with dephasing constant {dtime} and initial density matrix R')
def step_given_6(context, dtime):
"""
Given I have PureDephasing object D with dephasing constant {dtime} and initial density matrix R
"""
td = float(dtime)
print("Dephasing time", td)
context.td = td
# create test aggregatedimer
agg = qr.TestAggregate("trimer-2")
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,100.0)
agg.set_resonance_coupling(1,2,50.0)
agg.build()
HH = agg.get_Hamiltonian()
context.H = HH
print("Hamiltonian:")
print(HH)
# initial density matrix
R = qr.ReducedDensityMatrix(dim=HH.dim)
with qr.eigenbasis_of(HH):
R.data[1:4,1:4] = 0.5
context.R = R
dd = numpy.zeros((4,4), dtype=qr.REAL)
dd[1,2] = (1.0/td)**2
dd[2,1] = (1.0/td)**2
dd[1,3] = (1.0/td)**2
dd[3,1] = (1.0/td)**2
dd[2,3] = (1.0/td)**2
dd[3,2] = (1.0/td)**2
D = qr.qm.PureDephasing(drates=dd, dtype="Gaussian")
context.D = D
#
# And ...
#
@when('I multiply each coherence element by corresponding Gaussian decay with dephasing time {dtime} to get RE at time {t_prop}')
def step_when_7(context, dtime, t_prop):
"""
And I multiply each coherence element by corresponding Gaussian decay with dephasing time {dtime} to get RE at time {t_prop}
"""
delta = (1.0/float(dtime))**2
t = float(t_prop)
R = context.R
HH = context.H
RE = qr.ReducedDensityMatrix(dim=HH.dim)
RE.data[:,:] = 0.0
with qr.eigenbasis_of(HH):
for i in range(4):
for j in range(4):
om = HH.data[i,i]-HH.data[j,j]
if i != j:
RE.data[i,j] = R.data[i,j]*numpy.exp(-1j*om*t -(delta/2.0)*(t**2))
else:
RE.data[i,j] = R.data[i,j]*numpy.exp(-1j*om*t)
context.RE = RE
#
# When ...
#
@when('I calculate EvolutionSuperOperator step by step using only PureDephasing D with {time_step} and {N_dense}')
def step_when_8(context, time_step, N_dense):
"""
When I calculate EvolutionSuperOperator step by step using only PureDephasing D with {time_step} and {N_dense}
"""
# get the associated time axis and the relaxation tensor and Hamiltonian
dt = float(time_step)
N_dense = int(N_dense)
Ntot = 1320
Nsteps = int(Ntot/N_dense)
time = qr.TimeAxis(0, Ntot, 1.0)
time2 = qr.TimeAxis(0, Nsteps, dt)
context.time = time
context.time2 = time2
HH = context.H
DD = context.D
print("Dephasing type: ", DD.dtype)
# This tests if it is possible to ignore relaxation tensor in defining
# evolution superoperator
L = qr.qm.LindbladForm(HH, sbi=None)
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD, mode="jit")
U.set_dense_dt(N_dense)
U1 = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD)
with qr.eigenbasis_of(HH):
for i in range(1, Nsteps):
U.calculate_next()
U1.data[i,:,:,:,:] = U.data[:,:,:,:]
context.U = U1
#
# Given ...
#
@given('I have a Hamiltonian H, Lidblad form L, PureDephasing object D with dephasing constant {dtime} and initial density matrix R')
def step_given_9(context, dtime):
"""
Given I have a Hamiltonian H, Lidblad form L, PureDephasing object D with dephasing constant {dtime} and initial density matrix R
"""
td = float(dtime)
print("Dephasing time", td)
context.td = td
# create test aggregatedimer
agg = qr.TestAggregate("trimer-2")
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,100.0)
agg.set_resonance_coupling(1,2,50.0)
agg.build()
HH = agg.get_Hamiltonian()
context.H = HH
print("Hamiltonian:")
print(HH)
L = qr.qm.LindbladForm(HH, sbi=None)
context.L = L
# initial density matrix
R = qr.ReducedDensityMatrix(dim=HH.dim)
with qr.eigenbasis_of(HH):
R.data[1:4,1:4] = 0.5
context.R = R
dd = numpy.zeros((4,4), dtype=qr.REAL)
dd[1,2] = 1.0/td
dd[2,1] = 1.0/td
dd[1,3] = 1.0/td
dd[3,1] = 1.0/td
dd[2,3] = 1.0/td
dd[3,2] = 1.0/td
D = qr.qm.PureDephasing(drates=dd, dtype="Gaussian")
context.D = D
#
# When ...
#
@when('I calculate EvolutionSuperOperator step by step using PureDephasing D and Lindblad form L with {time_step} and {N_dense}')
def step_when_10(context, time_step, N_dense):
"""
When I calculate EvolutionSuperOperator step by step using PureDephasing D and Lindblad form L with {time_step} and {N_dense}
"""
HH = context.H
L = context.L
DD = context.D
dt = float(time_step)
N_dense = int(N_dense)
Ntot = 1320
Nsteps = int(Ntot/N_dense)
time2 = qr.TimeAxis(0, Nsteps, dt)
context.time2 = time2
mode = "jit"
print("Pure dephasing: ", DD.dtype)
if mode == "jit":
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD, mode="jit")
U.set_dense_dt(N_dense)
U1 = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD)
with qr.eigenbasis_of(HH):
for i in range(1, Nsteps):
U.calculate_next()
U1.data[i,:,:,:,:] = U.data[:,:,:,:]
context.U = U1
elif mode == "all":
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD, mode="all")
U.set_dense_dt(N_dense)
with qr.eigenbasis_of(HH):
U.calculate()
context.U = U
#
# And ...
#
@when('I propagate with Lindblad form L and PureDephasing D to get RE at time {t_prop}')
def step_when_11(context, t_prop):
"""
And I propagate with Lindblad form L and PureDephasing D to get RE at time {t_prop}
"""
t = float(t_prop)
# initial density matrix
R = context.R
HH = context.H
RE = qr.ReducedDensityMatrix(dim=HH.dim)
RE.data[:,:] = 0.0
time2 = context.time2
t2, dt2 = time2.locate(t)
L = context.L
D = context.D
prop = qr.ReducedDensityMatrixPropagator(timeaxis=time2,
Ham=HH, RTensor=L, PDeph=D)
prop.setDtRefinement(10)
with qr.eigenbasis_of(HH):
rhot = prop.propagate(R)
RE = qr.ReducedDensityMatrix(data=rhot.data[t2,:,:])
context.RE = RE
#
# When ...
#
@when('I calculate EvolutionSuperOperator in one shot using only PureDephasing D with {time_step} and {N_dense}')
def step_when_12(context, time_step, N_dense):
"""
When I calculate EvolutionSuperOperator in one shot using only PureDephasing D with {time_step} and {N_dense}
"""
HH = context.H
L = qr.qm.LindbladForm(HH, sbi=None)
DD = context.D
dt = float(time_step)
N_dense = int(N_dense)
Ntot = 1320
Nsteps = int(Ntot/N_dense)
time2 = qr.TimeAxis(0, Nsteps, dt)
context.time2 = time2
mode = "all"
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD, mode=mode)
U.set_dense_dt(N_dense)
if mode == "all":
with qr.eigenbasis_of(HH):
U.calculate()
context.U = U
elif mode == "jit":
U1 = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD)
with qr.eigenbasis_of(HH):
for i in range(1, Nsteps):
U.calculate_next()
U1.data[i,:,:,:,:] = U.data[:,:,:,:]
context.U = U1
#
# When ...
#
@when('I calculate EvolutionSuperOperator in one shot using PureDephasing D and Lindblad form L with {time_step} and {N_dense}')
def step_when_13(context, time_step, N_dense):
"""
When I calculate EvolutionSuperOperator in one shot using PureDephasing D and Lindblad form L with {time_step} and {N_dense}
"""
HH = context.H
L = context.L
DD = context.D
dt = float(time_step)
N_dense = int(N_dense)
Ntot = 1320
Nsteps = int(Ntot/N_dense)
time2 = qr.TimeAxis(0, Nsteps, dt)
context.time2 = time2
mode = "all"
U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD, mode=mode)
U.set_dense_dt(N_dense)
if mode == "all":
with qr.eigenbasis_of(HH):
U.calculate()
context.U = U
elif mode == "jit":
U1 = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=L,
pdeph=DD)
with qr.eigenbasis_of(HH):
for i in range(1, Nsteps):
U.calculate_next()
U1.data[i,:,:,:,:] = U.data[:,:,:,:]
context.U = U1
#
# Given ...
#
@given('I have a Hamiltonian H, Lidblad form L, PureDephasing object D with dephasing time {dtime} and initial density matrix R')
def step_given_14(context, dtime):
"""
Given I have a Hamiltonian H, Lidblad form L, PureDephasing object D with dephasing time {dtime} and initial density matrix R
"""
td = float(dtime)
print("Dephasing time", td)
context.td = td
# create test aggregatedimer
agg = qr.TestAggregate("trimer-2")
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,100.0)
agg.set_resonance_coupling(1,2,50.0)
agg.build()
HH = agg.get_Hamiltonian()
context.H = HH
print("Hamiltonian:")
print(HH)
L = qr.qm.LindbladForm(HH, sbi=None)
context.L = L
# initial density matrix
R = qr.ReducedDensityMatrix(dim=HH.dim)
with qr.eigenbasis_of(HH):
R.data[1:4,1:4] = 0.5
context.R = R
dd = numpy.zeros((4,4), dtype=qr.REAL)
dd[1,2] = 1.0/td
dd[2,1] = 1.0/td
dd[1,3] = 1.0/td
dd[3,1] = 1.0/td
dd[2,3] = 1.0/td
dd[3,2] = 1.0/td
D = qr.qm.PureDephasing(drates=dd, dtype="Lorentzian")
context.D = D
| 24.867893
| 137
| 0.569498
| 2,111
| 14,871
| 3.93747
| 0.101374
| 0.025265
| 0.01155
| 0.034649
| 0.871511
| 0.865375
| 0.856112
| 0.845645
| 0.836501
| 0.819418
| 0
| 0.03242
| 0.305158
| 14,871
| 598
| 138
| 24.867893
| 0.771993
| 0.163136
| 0
| 0.753943
| 1
| 0.018927
| 0.141566
| 0.010947
| 0
| 0
| 0
| 0
| 0.003155
| 1
| 0.044164
| false
| 0
| 0.015773
| 0
| 0.059937
| 0.072555
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ffbedb70cf4ab4b230e5f1bf244ea7599097ce11
| 2,467
|
py
|
Python
|
App(BE)/main/api/book.py
|
osamhack2021/AI_APP_handylib_devlib
|
62cf67e6df280217e3715e2aa425636cefa7dd6f
|
[
"MIT"
] | 1
|
2021-12-16T10:41:16.000Z
|
2021-12-16T10:41:16.000Z
|
App(BE)/main/api/book.py
|
osamhack2021/AI_APP_handylib_devlib
|
62cf67e6df280217e3715e2aa425636cefa7dd6f
|
[
"MIT"
] | 6
|
2021-10-11T06:03:48.000Z
|
2021-10-17T09:42:05.000Z
|
App(BE)/main/api/book.py
|
osamhack2021/AI_APP_handylib_devlib
|
62cf67e6df280217e3715e2aa425636cefa7dd6f
|
[
"MIT"
] | null | null | null |
#book blueprint
from flask import Blueprint, json, Response
from main.models import database
book_page=Blueprint('book',__name__)
@book_page.route('/search/title=<title>&page=<page>',methods=['GET'])
def book_search(title, page):
page = int(page)
if page <= 1:
page = 0
else:
page = page - 1
ajson = []
a = list (database.client.API_test.book.find( {'title': { '$regex': '{}'.format(title) }} ).skip(page*5).limit(5))
for o in range(len(a)):
del(a[o]['_id'])
ajson.append(a[o])
result = {'list': ajson }
resultJson = json.dumps(result, ensure_ascii=False)
return Response(resultJson,mimetype="application/json",status=200)
@book_page.route('/search/categoryId=<categoryId>&page=<page>',methods=['GET'])
def book_search_categoryId(categoryId, page):
categoryId = int(categoryId)
page = int(page)
ajson = []
if page <= 1:
page = 0
else:
page = page - 1
a = list (database.client.API_test.book.find( {'categoryId': categoryId} ).skip(page*5).limit(5))
for o in range(len(a)):
del(a[o]['_id'])
ajson.append(a[o])
result = {'list': ajson }
resultJson = json.dumps(result, ensure_ascii=False)
return Response(resultJson,mimetype="application/json",status=200)
@book_page.route('/feed/category/cat=<categoryId>&page=<page>',methods=['GET'])
def book_search_feed_categoryId(categoryId, page):
categoryId = int(categoryId)
page = int(page)
ajson = []
if page <= 1:
page = 0
else:
page = page - 1
a = list (database.client.API_test.book.find( {'categoryId': categoryId} ).skip(page*5).limit(5))
for o in range(len(a)):
del(a[o]['_id'])
ajson.append(a[o])
result = {'list': ajson }
resultJson = json.dumps(result, ensure_ascii=False)
return Response(resultJson,mimetype="application/json",status=200)
@book_page.route('/search/isbn=<isbn>&page=<page>',methods=['GET'])
def book_search_isbn(isbn, page):
page = int(page)
ajson = []
if page <= 1:
page = 0
else:
page = page - 1
a = list (database.client.API_test.book.find( {'isbn': { '$regex': '{}'.format(isbn) }} ).skip(page*5).limit(5))
for o in range(len(a)):
del(a[o]['_id'])
ajson.append(a[o])
result = {'list': ajson }
resultJson = json.dumps(result, ensure_ascii=False)
return Response(resultJson,mimetype="application/json",status=200)
| 34.746479
| 118
| 0.622213
| 333
| 2,467
| 4.522523
| 0.177177
| 0.053121
| 0.034529
| 0.047809
| 0.850598
| 0.837317
| 0.837317
| 0.796149
| 0.719124
| 0.702523
| 0
| 0.016244
| 0.201459
| 2,467
| 71
| 119
| 34.746479
| 0.748223
| 0.005675
| 0
| 0.8
| 0
| 0
| 0.123522
| 0.06115
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.030769
| 0
| 0.153846
| 0.030769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fff29ea77e9ccffe829cdb0c4e15323e0a082639
| 12,948
|
py
|
Python
|
prev_ob_models/KaplanLansner2014/MergeSpikefiles.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 5
|
2019-10-03T14:49:02.000Z
|
2022-01-13T13:37:34.000Z
|
prev_ob_models/KaplanLansner2014/MergeSpikefiles.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 4
|
2019-12-30T15:57:24.000Z
|
2020-10-07T22:42:50.000Z
|
prev_ob_models/KaplanLansner2014/MergeSpikefiles.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 2
|
2020-05-19T20:12:48.000Z
|
2020-11-04T17:17:44.000Z
|
import os
import numpy
import sys
import simulation_parameters
class MergeSpikefiles(object):
def __init__(self, params):
self.params = params
def merge_nspike_files(self, merge_pattern, sorted_pattern_output, pattern='', sort_idx=1):
if (pattern == ''): # merge for all patterns
for pattern in xrange(self.params['n_patterns']):
rnd_nr1 = numpy.random.randint(0,10**8)
rnd_nr2 = rnd_nr1 + 1
fn_out = sorted_pattern_output + str(pattern) + ".dat"
print 'output_file:', fn_out
# merge files from different processors
tmp_file = "tmp_%d" % (rnd_nr2)
os.system("cat %s%d_* > %s" % (merge_pattern, pattern, tmp_file))
# sort according to cell id
os.system("sort -gk %d %s > %s" % (sort_idx, tmp_file, fn_out))
os.system("rm %s" % (tmp_file))
else:
rnd_nr1 = numpy.random.randint(0,10**8)
rnd_nr2 = rnd_nr1 + 1
fn_out = sorted_pattern_output + str(pattern) + ".dat"
print 'output_file:', fn_out
# merge files from different processors
tmp_file = "tmp_%d" % (rnd_nr2)
os.system("cat %s%d_* > %s" % (merge_pattern, pattern, tmp_file))
# sort according to cell id
os.system("sort -gk %d %s > %s" % (sort_idx, tmp_file, fn_out))
os.system("rm %s" % (tmp_file))
def merge_spiketimes_files(self, merge_pattern, sorted_pattern_output, pattern='', sort_idx=1):
if (pattern == ''): # merge for all patterns
for pattern in xrange(self.params['n_patterns']):
rnd_nr1 = numpy.random.randint(0,10**8)
rnd_nr2 = numpy.random.randint(0,10**8) + 1
fn_out = sorted_pattern_output + str(pattern) + ".dat"
print 'output_file:', fn_out
# merge files from different processors
tmp_file = "tmp_%d" % (rnd_nr2)
os.system("cat %s%d_* > %s" % (merge_pattern, pattern, tmp_file))
# sort according to cell id
os.system("sort -gk %d %s > %s" % (sort_idx, tmp_file, fn_out))
os.system("rm %s" % (tmp_file))
else:
rnd_nr1 = numpy.random.randint(0,10**8)
rnd_nr2 = numpy.random.randint(0,10**8) + 1
fn_out = sorted_pattern_output + str(pattern) + ".dat"
print 'output_file:', fn_out
# merge files from different processors
tmp_file = "tmp_%d" % (rnd_nr2)
os.system("cat %s%d_* > %s" % (merge_pattern, pattern, tmp_file))
# sort according to cell id
os.system("sort -gk %d %s > %s" % (sort_idx, tmp_file, fn_out))
os.system("rm %s" % (tmp_file))
def merge_epth_spiketimes_file(self, pattern=''):
merge_pattern = self.params["orn_spiketimes_fn_base"]
sorted_pattern_output = self.params["orn_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
def merge_epth_nspike_files(self, pattern=''):
merge_pattern = self.params["orn_spike_fn_base"]
sorted_pattern_output = self.params["orn_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
def merge_ob_spiketimes_file(self, pattern='', sort_idx=1):
merge_pattern = self.params["mit_spiketimes_fn_base"]
sorted_pattern_output = self.params["mit_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["gran_spiketimes_fn_base"]
sorted_pattern_output = self.params["gran_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["pg_spiketimes_fn_base"]
sorted_pattern_output = self.params["pg_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
if (pattern==''):
# create spiketimes file from the other three
for pn in xrange(self.params['n_patterns']):
mit_fn = self.params['mit_spiketimes_merged_fn_base'] + str(pn) + '.dat'
gran_fn = self.params['gran_spiketimes_merged_fn_base'] + str(pn) + '.dat'
pg_fn = self.params['pg_spiketimes_merged_fn_base'] + str(pn) + '.dat'
ob_fn = self.params['ob_spikes_merged_fn_base'] + str(pn) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (mit_fn, pg_fn, gran_fn, tmp_fn))
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, ob_fn))
os.system('rm %s' % tmp_fn)
def merge_ob_nspike_files(self, pattern='', sort_idx=1):
merge_pattern = self.params["mit_spike_fn_base"]
sorted_pattern_output = self.params["mit_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["gran_spike_fn_base"]
sorted_pattern_output = self.params["gran_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["pg_spike_fn_base"]
sorted_pattern_output = self.params["pg_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
if (pattern==''):
# create spiketimes file from the other three
for pn in xrange(self.params['n_patterns']):
mit_fn = self.params['mit_spikes_merged_fn_base'] + str(pn) + '.dat'
gran_fn = self.params['gran_spikes_merged_fn_base'] + str(pn) + '.dat'
pg_fn = self.params['pg_spikes_merged_fn_base'] + str(pn) + '.dat'
ob_fn = self.params['ob_spikes_merged_fn_base'] + str(pn) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (mit_fn, pg_fn, gran_fn, tmp_fn))
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, ob_fn))
os.system('rm %s' % tmp_fn)
def merge_oc_spiketimes_files(self, pattern='', sort_idx=1):
merge_pattern = self.params["pyr_spiketimes_fn_base"]
sorted_pattern_output = self.params["pyr_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["basket_spiketimes_fn_base"]
sorted_pattern_output = self.params["basket_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["rsnp_spiketimes_fn_base"]
sorted_pattern_output = self.params["rsnp_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
if (pattern != ''):
pyr_fn = self.params['pyr_spiketimes_merged_fn_base'] + str(pattern) + '.dat'
basket_fn = self.params['basket_spiketimes_merged_fn_base'] + str(pattern) + '.dat'
rsnp_fn = self.params['rsnp_spiketimes_merged_fn_base'] + str(pattern) + '.dat'
oc_fn = self.params['oc_spiketimes_merged_fn_base'] + str(pattern) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (pyr_fn, rsnp_fn, basket_fn, tmp_fn))
# print 'cat %s %s %s > %s' % (pyr_fn, rsnp_fn, basket_fn, tmp_fn)
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, oc_fn))
# print 'sort -gk 1 %s > %s' % (tmp_fn, oc_fn)
os.system('rm %s' % tmp_fn)
else: # merge for all patterns
# create spiketimes file from the other three
for pn in xrange(self.params['n_patterns']):
pyr_fn = self.params['pyr_spiketimes_merged_fn_base'] + str(pn) + '.dat'
basket_fn = self.params['basket_spiketimes_merged_fn_base'] + str(pn) + '.dat'
rsnp_fn = self.params['rsnp_spiketimes_merged_fn_base'] + str(pn) + '.dat'
oc_fn = self.params['oc_spiketimes_merged_fn_base'] + str(pn) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (pyr_fn, rsnp_fn, basket_fn, tmp_fn))
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, oc_fn))
os.system('rm %s' % tmp_fn)
def merge_oc_nspike_files(self, pattern='', sort_idx=1):
# merge for each cell type individually
merge_pattern = self.params["pyr_spike_fn_base"]
sorted_pattern_output = self.params["pyr_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["basket_spike_fn_base"]
sorted_pattern_output = self.params["basket_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
merge_pattern = self.params["rsnp_spike_fn_base"]
sorted_pattern_output = self.params["rsnp_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
# merge output of all cell_types into one file
if (pattern != ''):
pyr_fn = self.params['pyr_spikes_merged_fn_base'] + str(pattern) + '.dat'
basket_fn = self.params['basket_spikes_merged_fn_base'] + str(pattern) + '.dat'
rsnp_fn = self.params['rsnp_spikes_merged_fn_base'] + str(pattern) + '.dat'
oc_fn = self.params['oc_spikes_merged_fn_base'] + str(pattern) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (pyr_fn, rsnp_fn, basket_fn, tmp_fn))
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, oc_fn))
os.system('rm %s' % tmp_fn)
else:
# create spiketimes file from the other three
for pn in xrange(self.params['n_patterns']):
pyr_fn = self.params['pyr_spikes_merged_fn_base'] + str(pn) + '.dat'
basket_fn = self.params['basket_spikes_merged_fn_base'] + str(pn) + '.dat'
rsnp_fn = self.params['rsnp_spikes_merged_fn_base'] + str(pn) + '.dat'
oc_fn = self.params['oc_spikes_merged_fn_base'] + str(pn) + '.dat'
rnd_nr = numpy.random.randint(0,10**8)
tmp_fn = 'tmp_%d' % (rnd_nr)
os.system('cat %s %s %s > %s' % (pyr_fn, rsnp_fn, basket_fn, tmp_fn))
os.system('sort -gk %d %s > %s' % (sort_idx, tmp_fn, oc_fn))
os.system('rm %s' % tmp_fn)
def merge_readout_spiketimes_files(self, pattern=''):
merge_pattern = self.params["readout_spiketimes_fn_base"]
sorted_pattern_output = self.params["readout_spiketimes_merged_fn_base"]
self.merge_spiketimes_files(merge_pattern, sorted_pattern_output, pattern)
def merge_readout_nspike_files(self, pattern=''):
merge_pattern = self.params["readout_spike_fn_base"]
sorted_pattern_output = self.params["readout_spikes_merged_fn_base"]
self.merge_nspike_files(merge_pattern, sorted_pattern_output, pattern)
if __name__ == '__main__':
info_txt = \
"""
Usage:
python MergeSpikeFiles.py [FOLDER] [CELLTYPE]
or
python MergeSpikeFiles.py [FOLDER] [CELLTYPE] [PATTERN_NUMBER]
"""
assert (len(sys.argv) > 2), 'ERROR: folder and cell_type not given\n' + info_txt
folder = sys.argv[1]
cell_type = sys.argv[2]
params_fn = os.path.abspath(folder) + '/Parameters/simulation_parameters.json'
param_tool = simulation_parameters.parameter_storage(params_fn=params_fn)
params = param_tool.params
try:
pn_max = int(sys.argv[3])
except:
print 'Merging all patterns'
pn_max = params['n_patterns']
if cell_type == 'all':
cell_types = params['cell_types']
else:
cell_types = [cell_type]
MS = MergeSpikefiles(params)
for cell_type in cell_types:
for pattern in xrange(pn_max):
print 'Merging nspike file for %s pattern %d' % (cell_type, pattern)
MS.merge_nspike_files(params['%s_spike_fn_base' % cell_type], params['%s_spikes_merged_fn_base' % cell_type], pattern)
print 'Merging spiketimes file for %s pattern %d' % (cell_type, pattern)
MS.merge_spiketimes_files(params['%s_spiketimes_fn_base' % cell_type], params['%s_spiketimes_merged_fn_base' % cell_type], pattern)
| 48.860377
| 143
| 0.622258
| 1,753
| 12,948
| 4.244723
| 0.068454
| 0.08601
| 0.067733
| 0.048381
| 0.882408
| 0.8687
| 0.855799
| 0.842897
| 0.718586
| 0.717511
| 0
| 0.007999
| 0.256565
| 12,948
| 264
| 144
| 49.045455
| 0.765011
| 0.055221
| 0
| 0.507853
| 0
| 0
| 0.199701
| 0.117647
| 0
| 0
| 0
| 0
| 0.005236
| 0
| null | null | 0
| 0.020942
| null | null | 0.036649
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0812a8e9416c2d29e680d0504d5275c7e523849b
| 84
|
py
|
Python
|
tests/guinea-pigs/diff_assert_repr.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 105
|
2015-06-24T15:40:41.000Z
|
2022-02-04T10:30:34.000Z
|
tests/guinea-pigs/diff_assert_repr.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 145
|
2015-06-24T15:26:28.000Z
|
2022-03-22T20:04:19.000Z
|
tests/guinea-pigs/diff_assert_repr.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 76
|
2015-07-20T08:18:21.000Z
|
2022-03-18T20:03:53.000Z
|
def test_test():
assert 123 == "123"
def test_test_2():
assert [] == "[]"
| 12
| 23
| 0.52381
| 11
| 84
| 3.727273
| 0.454545
| 0.341463
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112903
| 0.261905
| 84
| 6
| 24
| 14
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0.059524
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
081fdc44ce4b8340b58adfbfdc53414f4dafdff6
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_annie/na_annie_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_annie/na_annie_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_annie/na_annie_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Annie_Top_Aatrox(Ratings):
pass
class NA_Annie_Top_Ahri(Ratings):
pass
class NA_Annie_Top_Akali(Ratings):
pass
class NA_Annie_Top_Alistar(Ratings):
pass
class NA_Annie_Top_Amumu(Ratings):
pass
class NA_Annie_Top_Anivia(Ratings):
pass
class NA_Annie_Top_Annie(Ratings):
pass
class NA_Annie_Top_Ashe(Ratings):
pass
class NA_Annie_Top_AurelionSol(Ratings):
pass
class NA_Annie_Top_Azir(Ratings):
pass
class NA_Annie_Top_Bard(Ratings):
pass
class NA_Annie_Top_Blitzcrank(Ratings):
pass
class NA_Annie_Top_Brand(Ratings):
pass
class NA_Annie_Top_Braum(Ratings):
pass
class NA_Annie_Top_Caitlyn(Ratings):
pass
class NA_Annie_Top_Camille(Ratings):
pass
class NA_Annie_Top_Cassiopeia(Ratings):
pass
class NA_Annie_Top_Chogath(Ratings):
pass
class NA_Annie_Top_Corki(Ratings):
pass
class NA_Annie_Top_Darius(Ratings):
pass
class NA_Annie_Top_Diana(Ratings):
pass
class NA_Annie_Top_Draven(Ratings):
pass
class NA_Annie_Top_DrMundo(Ratings):
pass
class NA_Annie_Top_Ekko(Ratings):
pass
class NA_Annie_Top_Elise(Ratings):
pass
class NA_Annie_Top_Evelynn(Ratings):
pass
class NA_Annie_Top_Ezreal(Ratings):
pass
class NA_Annie_Top_Fiddlesticks(Ratings):
pass
class NA_Annie_Top_Fiora(Ratings):
pass
class NA_Annie_Top_Fizz(Ratings):
pass
class NA_Annie_Top_Galio(Ratings):
pass
class NA_Annie_Top_Gangplank(Ratings):
pass
class NA_Annie_Top_Garen(Ratings):
pass
class NA_Annie_Top_Gnar(Ratings):
pass
class NA_Annie_Top_Gragas(Ratings):
pass
class NA_Annie_Top_Graves(Ratings):
pass
class NA_Annie_Top_Hecarim(Ratings):
pass
class NA_Annie_Top_Heimerdinger(Ratings):
pass
class NA_Annie_Top_Illaoi(Ratings):
pass
class NA_Annie_Top_Irelia(Ratings):
pass
class NA_Annie_Top_Ivern(Ratings):
pass
class NA_Annie_Top_Janna(Ratings):
pass
class NA_Annie_Top_JarvanIV(Ratings):
pass
class NA_Annie_Top_Jax(Ratings):
pass
class NA_Annie_Top_Jayce(Ratings):
pass
class NA_Annie_Top_Jhin(Ratings):
pass
class NA_Annie_Top_Jinx(Ratings):
pass
class NA_Annie_Top_Kalista(Ratings):
pass
class NA_Annie_Top_Karma(Ratings):
pass
class NA_Annie_Top_Karthus(Ratings):
pass
class NA_Annie_Top_Kassadin(Ratings):
pass
class NA_Annie_Top_Katarina(Ratings):
pass
class NA_Annie_Top_Kayle(Ratings):
pass
class NA_Annie_Top_Kayn(Ratings):
pass
class NA_Annie_Top_Kennen(Ratings):
pass
class NA_Annie_Top_Khazix(Ratings):
pass
class NA_Annie_Top_Kindred(Ratings):
pass
class NA_Annie_Top_Kled(Ratings):
pass
class NA_Annie_Top_KogMaw(Ratings):
pass
class NA_Annie_Top_Leblanc(Ratings):
pass
class NA_Annie_Top_LeeSin(Ratings):
pass
class NA_Annie_Top_Leona(Ratings):
pass
class NA_Annie_Top_Lissandra(Ratings):
pass
class NA_Annie_Top_Lucian(Ratings):
pass
class NA_Annie_Top_Lulu(Ratings):
pass
class NA_Annie_Top_Lux(Ratings):
pass
class NA_Annie_Top_Malphite(Ratings):
pass
class NA_Annie_Top_Malzahar(Ratings):
pass
class NA_Annie_Top_Maokai(Ratings):
pass
class NA_Annie_Top_MasterYi(Ratings):
pass
class NA_Annie_Top_MissFortune(Ratings):
pass
class NA_Annie_Top_MonkeyKing(Ratings):
pass
class NA_Annie_Top_Mordekaiser(Ratings):
pass
class NA_Annie_Top_Morgana(Ratings):
pass
class NA_Annie_Top_Nami(Ratings):
pass
class NA_Annie_Top_Nasus(Ratings):
pass
class NA_Annie_Top_Nautilus(Ratings):
pass
class NA_Annie_Top_Nidalee(Ratings):
pass
class NA_Annie_Top_Nocturne(Ratings):
pass
class NA_Annie_Top_Nunu(Ratings):
pass
class NA_Annie_Top_Olaf(Ratings):
pass
class NA_Annie_Top_Orianna(Ratings):
pass
class NA_Annie_Top_Ornn(Ratings):
pass
class NA_Annie_Top_Pantheon(Ratings):
pass
class NA_Annie_Top_Poppy(Ratings):
pass
class NA_Annie_Top_Quinn(Ratings):
pass
class NA_Annie_Top_Rakan(Ratings):
pass
class NA_Annie_Top_Rammus(Ratings):
pass
class NA_Annie_Top_RekSai(Ratings):
pass
class NA_Annie_Top_Renekton(Ratings):
pass
class NA_Annie_Top_Rengar(Ratings):
pass
class NA_Annie_Top_Riven(Ratings):
pass
class NA_Annie_Top_Rumble(Ratings):
pass
class NA_Annie_Top_Ryze(Ratings):
pass
class NA_Annie_Top_Sejuani(Ratings):
pass
class NA_Annie_Top_Shaco(Ratings):
pass
class NA_Annie_Top_Shen(Ratings):
pass
class NA_Annie_Top_Shyvana(Ratings):
pass
class NA_Annie_Top_Singed(Ratings):
pass
class NA_Annie_Top_Sion(Ratings):
pass
class NA_Annie_Top_Sivir(Ratings):
pass
class NA_Annie_Top_Skarner(Ratings):
pass
class NA_Annie_Top_Sona(Ratings):
pass
class NA_Annie_Top_Soraka(Ratings):
pass
class NA_Annie_Top_Swain(Ratings):
pass
class NA_Annie_Top_Syndra(Ratings):
pass
class NA_Annie_Top_TahmKench(Ratings):
pass
class NA_Annie_Top_Taliyah(Ratings):
pass
class NA_Annie_Top_Talon(Ratings):
pass
class NA_Annie_Top_Taric(Ratings):
pass
class NA_Annie_Top_Teemo(Ratings):
pass
class NA_Annie_Top_Thresh(Ratings):
pass
class NA_Annie_Top_Tristana(Ratings):
pass
class NA_Annie_Top_Trundle(Ratings):
pass
class NA_Annie_Top_Tryndamere(Ratings):
pass
class NA_Annie_Top_TwistedFate(Ratings):
pass
class NA_Annie_Top_Twitch(Ratings):
pass
class NA_Annie_Top_Udyr(Ratings):
pass
class NA_Annie_Top_Urgot(Ratings):
pass
class NA_Annie_Top_Varus(Ratings):
pass
class NA_Annie_Top_Vayne(Ratings):
pass
class NA_Annie_Top_Veigar(Ratings):
pass
class NA_Annie_Top_Velkoz(Ratings):
pass
class NA_Annie_Top_Vi(Ratings):
pass
class NA_Annie_Top_Viktor(Ratings):
pass
class NA_Annie_Top_Vladimir(Ratings):
pass
class NA_Annie_Top_Volibear(Ratings):
pass
class NA_Annie_Top_Warwick(Ratings):
pass
class NA_Annie_Top_Xayah(Ratings):
pass
class NA_Annie_Top_Xerath(Ratings):
pass
class NA_Annie_Top_XinZhao(Ratings):
pass
class NA_Annie_Top_Yasuo(Ratings):
pass
class NA_Annie_Top_Yorick(Ratings):
pass
class NA_Annie_Top_Zac(Ratings):
pass
class NA_Annie_Top_Zed(Ratings):
pass
class NA_Annie_Top_Ziggs(Ratings):
pass
class NA_Annie_Top_Zilean(Ratings):
pass
class NA_Annie_Top_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
084e94eb464ab401196c0a0bc6fa55ab5aae43b6
| 68,577
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_rr/cmp_perlbenchgamessbzip2calculix/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_rr/cmp_perlbenchgamessbzip2calculix/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_rr/cmp_perlbenchgamessbzip2calculix/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.252741,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.401203,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.06821,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.726949,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.25881,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.721964,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.70773,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.554789,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.08567,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.201807,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0263525,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.297246,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.194893,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.499054,
'Execution Unit/Register Files/Runtime Dynamic': 0.221245,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.78751,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.76002,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.49557,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00212992,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00212992,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00186263,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000725147,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00279965,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00892212,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0201542,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.187355,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.460187,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.636343,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.31296,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0239757,
'L2/Runtime Dynamic': 0.00602306,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.71927,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.64227,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.177361,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.177361,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.56022,
'Load Store Unit/Runtime Dynamic': 3.69432,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.437341,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.874683,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.155214,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.155485,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0757047,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.826828,
'Memory Management Unit/Runtime Dynamic': 0.231189,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.0271,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.704059,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0456443,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.370308,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.12001,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 11.8601,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 7.59011e-06,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.194469,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.313671,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.158331,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.66647,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.222415,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.25813,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.43393e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00815689,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.058985,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0603252,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0589865,
'Execution Unit/Register Files/Runtime Dynamic': 0.0684821,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.124265,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.326404,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.66942,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00249135,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00249135,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00223246,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000898408,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000866577,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00808174,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0216537,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0579922,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.6888,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.229628,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.196968,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.08634,
'Instruction Fetch Unit/Runtime Dynamic': 0.514323,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0297368,
'L2/Runtime Dynamic': 0.00581389,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.07721,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.886587,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0595312,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0595312,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.35833,
'Load Store Unit/Runtime Dynamic': 1.23971,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.146794,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.293588,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0520976,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0523539,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.229356,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0382077,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.474959,
'Memory Management Unit/Runtime Dynamic': 0.0905616,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.797,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 4.00727e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00877394,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0992643,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.108042,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.62787,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.250302,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.403728,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.203789,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.857819,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.286273,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.38316,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0104988,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0759195,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0776451,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0759195,
'Execution Unit/Register Files/Runtime Dynamic': 0.0881439,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.159941,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.480583,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.03461,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0020578,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0020578,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00183023,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000729236,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00111538,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00706122,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0183763,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0746422,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.74789,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.248026,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.253519,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.19682,
'Instruction Fetch Unit/Runtime Dynamic': 0.601624,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0564862,
'L2/Runtime Dynamic': 0.0157907,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74708,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.22701,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0812033,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0812033,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.13054,
'Load Store Unit/Runtime Dynamic': 1.70868,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.200234,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.400467,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0710635,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.071909,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.295206,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0406684,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.57339,
'Memory Management Unit/Runtime Dynamic': 0.112577,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.9299,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.011293,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.130247,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.14154,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.61483,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.242453,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.393122,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.30863,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337984,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.545156,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.275176,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.15832,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.185925,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.49299,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.247228,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0141766,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.193313,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.104844,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.440541,
'Execution Unit/Register Files/Runtime Dynamic': 0.119021,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.467883,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.913945,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.98978,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000795581,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000795581,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0006916,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000266991,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.0015061,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00378886,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00767621,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.10079,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.41109,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.248962,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.342327,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.94075,
'Instruction Fetch Unit/Runtime Dynamic': 0.703544,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0657792,
'L2/Runtime Dynamic': 0.00413921,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.98015,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.839273,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0563913,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0563912,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.24645,
'Load Store Unit/Runtime Dynamic': 1.17377,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.139051,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.278102,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0493498,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0503302,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.398618,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0408354,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.639501,
'Memory Management Unit/Runtime Dynamic': 0.0911656,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.9749,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.650344,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0231635,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.156297,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.829804,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.7922,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.5840218280895675,
'Runtime Dynamic': 0.5840218280895675,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.202737,
'Runtime Dynamic': 0.119102,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 90.9316,
'Peak Power': 124.044,
'Runtime Dynamic': 26.0141,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 90.7289,
'Total Cores/Runtime Dynamic': 25.895,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.202737,
'Total L3s/Runtime Dynamic': 0.119102,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.02954
| 124
| 0.681876
| 8,086
| 68,577
| 5.777022
| 0.067648
| 0.123649
| 0.113031
| 0.093507
| 0.939631
| 0.931604
| 0.918866
| 0.886563
| 0.86188
| 0.842699
| 0
| 0.131275
| 0.224434
| 68,577
| 914
| 125
| 75.02954
| 0.74702
| 0
| 0
| 0.642232
| 0
| 0
| 0.657718
| 0.04812
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b6ef579225df1da708a2241bd43ced499f72940
| 28,805
|
py
|
Python
|
project/project03/myanalysis/Myanalysis.py
|
creamcheesesteak/Project_Guardians
|
bfd499f9422964b35acaada1c2e4872835c06c79
|
[
"Apache-2.0"
] | null | null | null |
project/project03/myanalysis/Myanalysis.py
|
creamcheesesteak/Project_Guardians
|
bfd499f9422964b35acaada1c2e4872835c06c79
|
[
"Apache-2.0"
] | null | null | null |
project/project03/myanalysis/Myanalysis.py
|
creamcheesesteak/Project_Guardians
|
bfd499f9422964b35acaada1c2e4872835c06c79
|
[
"Apache-2.0"
] | 2
|
2021-09-10T11:19:25.000Z
|
2021-09-23T23:58:33.000Z
|
import pandas as pd
import sqlite3
class Co2:
# ind_name -> 산업명
def ind_name(self,ind):
con = sqlite3.connect('./sorting.db')
df = pd.read_sql_query('select * from sorting',con)
df2 = df[['sort','industry']]
df3 = df2[df2['industry'] == ind]
result = df3['sort'].tolist()
return result
# graph -> 그래프용 데이터 추출
def graph(self,ind,sido):
#df = pd.read_excel('../../data/calc2_result_graph.xlsx',engine='openpyxl', index_col=0);
con = sqlite3.connect('./sorting.db')
df = pd.read_sql_query('select * from calc2_result_graph',con)
df = df.set_index('time')
graph_col = ind + sido
for i in df:
if i == graph_col:
df = df[graph_col]
df2 = df.transpose()
result = df2[['year_sum','tanso','tree']].tolist()
return result
########################################################
#---------------------BASE--------------------------------#
# 감축량 이하값 기준으로 기본 sort (1순위 감축량 / 2순위 절감액)#
########################################################
def sol1_alt(self,ind,user_co2):
# django 돌릴때 경로
con = sqlite3.connect('./sorting.db')
# 테스트로 돌릴때 경로
# con = sqlite3.connect('../../sorting.db')
df = pd.read_sql_query('select * from top_8_final', con)
user_co2 = float(user_co2)
# 제조업
if ind == 'manufacture':
df_manu = df[df['sort'] == '제조업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_manu['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_manu_except = df_manu.tail(5);
df_manu_except2 = df_manu_except['deco2'].tolist();
for i in df_manu_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_manu[df_manu['deco2']==i]);
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist();
result_data2 = df3['act'].tolist();
result_data3 = df3['money'].tolist();
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 건설업
elif ind == 'building':
df_bulid = df[df['sort'] == '건설업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_bulid['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_bulid_except = df_bulid.tail(5);
df_bulid_except2 = df_bulid_except['deco2'].tolist();
for i in df_bulid_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_bulid[df_bulid['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 도매 및 소매업
elif ind == 'retail':
df_retail = df[df['sort'] == '도매 및 소매업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_retail['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_retail_except = df_retail.tail(5);
df_retail_except2 = df_retail_except['deco2'].tolist();
for i in df_retail_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_retail[df_retail['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 광업
elif ind == 'mining':
df_mining = df[df['sort'] == '광업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_mining['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_mining_except = df_mining.tail(5);
df_mining_except2 = df_mining_except['deco2'].tolist();
for i in df_mining_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_mining[df_mining['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 운수업
elif ind == 'transportation':
df_transport = df[df['sort'] == '운수업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_transport['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_transport_except = df_transport.tail(5);
df_transport_except2 = df_transport_except['deco2'].tolist();
for i in df_transport_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_transport[df_transport['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 폐기물 및 재생사업
elif ind == 'recycle':
df_recycle = df[df['sort'] == '폐기물 및 재생사업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_recycle['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_recycle_except = df_recycle.tail(5);
df_recycle_except2 = df_recycle_except['deco2'].tolist();
for i in df_recycle_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_recycle[df_recycle['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 발전에너지, 수도사업
elif ind == 'energy':
df_energy = df[df['sort'] == '발전에너지, 수도사업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_energy['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_energy_except = df_energy.tail(5);
df_energy_except2 = df_energy_except['deco2'].tolist();
for i in df_energy_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_energy[df_energy['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 농림어업
elif ind == 'primary':
df_primary = df[df['sort'] == '농림어업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_primary['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_primary_except = df_primary.tail(5);
df_primary_except2 = df_primary_except['deco2'].tolist();
for i in df_primary_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_primary[df_primary['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
return result
########################################################
# 감축량 이하값 기준으로 sort (에너지 절감량(연료) 기준)#
########################################################
def sol1_alt_fuel(self,ind,user_co2,sort_base):
# django 돌릴때 경로
con = sqlite3.connect('./sorting.db')
# 테스트로 돌릴때 경로
# con = sqlite3.connect('../../sorting.db')
df = pd.read_sql_query('select * from top_8_final', con)
for i in range(15,20):
df_co2 = df[df['no'] == sort_base[i]].index
df = df.drop(df_co2)
user_co2 = float(user_co2)
# 제조업
if ind == 'manufacture':
df_manu = df[df['sort'] == '제조업']
df_manu = df_manu.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_manu['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_manu_except = df_manu.tail(5);
df_manu_except2 = df_manu_except['deco2'].tolist();
for i in df_manu_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_manu[df_manu['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 건설업
elif ind == 'building':
df_bulid = df[df['sort'] == '건설업']
df_bulid = df_bulid.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_bulid['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_bulid_except = df_bulid.tail(5);
df_bulid_except2 = df_bulid_except['deco2'].tolist();
for i in df_bulid_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_bulid[df_bulid['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 도매 및 소매업
elif ind == 'retail':
df_retail = df[df['sort'] == '도매 및 소매업']
df_retail = df_retail.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_retail['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_retail_except = df_retail.tail(5);
df_retail_except2 = df_retail_except['deco2'].tolist();
for i in df_retail_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_retail[df_retail['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 광업
elif ind == 'mining':
df_mining = df[df['sort'] == '광업']
df_mining = df_mining.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_mining['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_mining_except = df_mining.tail(5);
df_mining_except2 = df_mining_except['deco2'].tolist();
for i in df_mining_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_mining[df_mining['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 운수업
elif ind == 'transportation':
df_transport = df[df['sort'] == '운수업']
df_transport = df_transport.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_transport['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_transport_except = df_transport.tail(5);
df_transport_except2 = df_transport_except['deco2'].tolist();
for i in df_transport_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_transport[df_transport['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 폐기물 및 재생사업
elif ind == 'recycle':
df_recycle = df[df['sort'] == '폐기물 및 재생사업']
df_recycle = df_recycle.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_recycle['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_recycle_except = df_recycle.tail(5);
df_recycle_except2 = df_recycle_except['deco2'].tolist();
for i in df_recycle_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_recycle[df_recycle['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 발전에너지, 수도사업
elif ind == 'energy':
df_energy = df[df['sort'] == '발전에너지, 수도사업']
df_energy = df_energy.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_energy['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_energy_except = df_energy.tail(5);
df_energy_except2 = df_energy_except['deco2'].tolist();
for i in df_energy_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_energy[df_energy['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 농림어업
elif ind == 'primary':
df_primary = df[df['sort'] == '농림어업']
df_primary = df_primary.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_primary['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_primary_except = df_primary.tail(5);
df_primary_except2 = df_primary_except['deco2'].tolist();
for i in df_primary_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_primary[df_primary['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
return result
########################################################
# 감축량 이하값 기준으로 sort (에너지 절감량(전력) 기준)#
########################################################
def sol1_alt_elec(self, ind, user_co2, sort_base):
# django 돌릴때 경로
con = sqlite3.connect('./sorting.db')
# 테스트로 돌릴때 경로
# con = sqlite3.connect('../../sorting.db')
df = pd.read_sql_query('select * from top_8_final', con)
for i in range(15,20):
df_co2 = df[df['no'] == sort_base[i]].index
df = df.drop(df_co2)
user_co2 = float(user_co2)
# 제조업
if ind == 'manufacture':
df_manu = df[df['sort'] == '제조업']
df_manu = df_manu.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_manu['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_manu_except = df_manu.tail(5);
df_manu_except2 = df_manu_except['deco2'].tolist();
for i in df_manu_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_manu[df_manu['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 건설업
elif ind == 'building':
df_bulid = df[df['sort'] == '건설업']
df_bulid = df_bulid.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_bulid['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_bulid_except = df_bulid.tail(5);
df_bulid_except2 = df_bulid_except['deco2'].tolist();
for i in df_bulid_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_bulid[df_bulid['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 도매 및 소매업
elif ind == 'retail':
df_retail = df[df['sort'] == '도매 및 소매업']
df_retail = df_retail.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_retail['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_retail_except = df_retail.tail(5);
df_retail_except2 = df_retail_except['deco2'].tolist();
for i in df_retail_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_retail[df_retail['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 광업
elif ind == 'mining':
df_mining = df[df['sort'] == '광업']
df_mining = df_mining.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_mining['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_mining_except = df_mining.tail(5);
df_mining_except2 = df_mining_except['deco2'].tolist();
for i in df_mining_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_mining[df_mining['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 운수업
elif ind == 'transportation':
df_transport = df[df['sort'] == '운수업']
df_transport = df_transport.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_transport['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_transport_except = df_transport.tail(5);
df_transport_except2 = df_transport_except['deco2'].tolist();
for i in df_transport_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_transport[df_transport['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 폐기물 및 재생사업
elif ind == 'recycle':
df_recycle = df[df['sort'] == '폐기물 및 재생사업']
df_recycle = df_recycle.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_recycle['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_recycle_except = df_recycle.tail(5);
df_recycle_except2 = df_recycle_except['deco2'].tolist();
for i in df_recycle_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_recycle[df_recycle['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 발전에너지, 수도사업
elif ind == 'energy':
df_energy = df[df['sort'] == '발전에너지, 수도사업']
df_energy = df_energy.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_energy['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_energy_except = df_energy.tail(5);
df_energy_except2 = df_energy_except['deco2'].tolist();
for i in df_energy_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_energy[df_energy['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 농림어업
elif ind == 'primary':
df_primary = df[df['sort'] == '농림어업']
df_primary = df_primary.sort_values(by=['에너지 절감량(전력)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_primary['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_primary_except = df_primary.tail(5);
df_primary_except2 = df_primary_except['deco2'].tolist();
for i in df_primary_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_primary[df_primary['deco2'] == i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
return result
if __name__ == '__main__':
# print(Co2().c1('manufacture'))
# print(Co2().c3('manufacture'))
# print(Co2().graph('manufacture','강원도'))
print(Co2().sol1_alt('primary',40))
print(Co2().sol1_alt_fuel('primary',40))
print(Co2().sol1_alt_elec('primary',40))
| 31.793598
| 97
| 0.46728
| 3,210
| 28,805
| 3.993769
| 0.041745
| 0.030889
| 0.046334
| 0.030577
| 0.95507
| 0.95507
| 0.951326
| 0.951326
| 0.951326
| 0.951326
| 0
| 0.047633
| 0.392154
| 28,805
| 905
| 98
| 31.828729
| 0.684562
| 0.026384
| 0
| 0.931034
| 0
| 0
| 0.057307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007837
| false
| 0
| 0.003135
| 0
| 0.020376
| 0.004702
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2989608e0de3f3248d37d055d361d40d37b95f86
| 265
|
py
|
Python
|
rastervision/v2/rv/backend/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2019-11-07T10:02:23.000Z
|
2019-11-07T10:02:23.000Z
|
rastervision/v2/rv/backend/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/v2/rv/backend/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from rastervision.v2.rv.backend.backend import *
from rastervision.v2.rv.backend.backend_config import *
from rastervision.v2.rv.backend.pytorch_chip_classification import *
from rastervision.v2.rv.backend.pytorch_chip_classification_config import *
| 44.166667
| 75
| 0.845283
| 36
| 265
| 6.055556
| 0.333333
| 0.293578
| 0.330275
| 0.366972
| 0.87156
| 0.87156
| 0.53211
| 0.53211
| 0.53211
| 0
| 0
| 0.020325
| 0.071698
| 265
| 6
| 75
| 44.166667
| 0.865854
| 0.045283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
298ef43c25383bc8a19fd643f4613084e7e2a880
| 4,274
|
py
|
Python
|
users-backend/users/internal_api/tests/queries/test_users_by_ids.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 2
|
2017-07-18T21:51:25.000Z
|
2017-12-23T11:08:39.000Z
|
users-backend/users/internal_api/tests/queries/test_users_by_ids.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 23
|
2017-07-18T20:22:38.000Z
|
2018-01-05T05:45:15.000Z
|
users-backend/users/internal_api/tests/queries/test_users_by_ids.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 2
|
2017-07-18T21:27:33.000Z
|
2017-07-18T22:07:03.000Z
|
from ward import test
from users.tests.api import internalapi_graphql_client
from users.tests.factories import user_factory
from users.tests.session import db
@test("get users by ids")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="pycon-backend")
user_1 = await user_factory(
email="testuser@user.it", fullname="Name", is_staff=False
)
user_2 = await user_factory(
email="testuser2@user.it", fullname="Another", is_staff=False
)
await user_factory(email="testuser3@user.it", fullname="Name", is_staff=False)
query = """query($ids: [ID!]!) {
usersByIds(ids: $ids) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"ids": [user_1.id, user_2.id]}
)
assert not response.errors
assert len(response.data["usersByIds"]) == 2
assert {"id": str(user_1.id)} in response.data["usersByIds"]
assert {"id": str(user_2.id)} in response.data["usersByIds"]
@test("get users by ids with no ids passed returns nothing")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="pycon-backend")
await user_factory(email="testuser@user.it", fullname="Name", is_staff=False)
await user_factory(email="testuser2@user.it", fullname="Another", is_staff=False)
await user_factory(email="testuser3@user.it", fullname="Name", is_staff=False)
query = """query($ids: [ID!]!) {
usersByIds(ids: $ids) {
id
}
}"""
response = await internalapi_graphql_client.query(query, variables={"ids": []})
assert not response.errors
assert len(response.data["usersByIds"]) == 0
@test("user is not returned if the id does not exist")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="pycon-backend")
await user_factory(email="testuser@user.it", fullname="Name", is_staff=False)
await user_factory(email="testuser2@user.it", fullname="Another", is_staff=False)
await user_factory(email="testuser3@user.it", fullname="Name", is_staff=False)
query = """query($ids: [ID!]!) {
usersByIds(ids: $ids) {
id
}
}"""
response = await internalapi_graphql_client.query(query, variables={"ids": [50]})
assert not response.errors
assert len(response.data["usersByIds"]) == 0
@test("cannot call without token")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
user_1 = await user_factory(
email="testuser@user.it", fullname="Name", is_staff=False
)
user_2 = await user_factory(
email="testuser2@user.it", fullname="Another", is_staff=False
)
await user_factory(email="testuser3@user.it", fullname="Name", is_staff=False)
query = """query($ids: [ID!]!) {
usersByIds(ids: $ids) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"ids": [user_1.id, user_2.id]}
)
assert response.errors[0]["message"] == "Forbidden"
assert not response.data
@test("cannot call token of not allowed service")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="not-allowed-service")
user_1 = await user_factory(
email="testuser@user.it", fullname="Name", is_staff=False
)
user_2 = await user_factory(
email="testuser2@user.it", fullname="Another", is_staff=False
)
await user_factory(email="testuser3@user.it", fullname="Name", is_staff=False)
query = """query($ids: [ID!]!) {
usersByIds(ids: $ids) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"ids": [user_1.id, user_2.id]}
)
assert response.errors[0]["message"] == "Forbidden"
assert not response.data
| 30.748201
| 85
| 0.671268
| 538
| 4,274
| 5.13197
| 0.133829
| 0.103586
| 0.17385
| 0.114089
| 0.88736
| 0.856212
| 0.856212
| 0.856212
| 0.856212
| 0.838827
| 0
| 0.009001
| 0.194197
| 4,274
| 138
| 86
| 30.971014
| 0.792683
| 0
| 0
| 0.743363
| 0
| 0
| 0.252925
| 0
| 0
| 0
| 0
| 0
| 0.106195
| 1
| 0
| false
| 0.00885
| 0.035398
| 0
| 0.035398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29ab62aa18aa121b5e2e3f71a962d888572b4544
| 1,734
|
py
|
Python
|
migrations/versions/ee90ed45fc44_.py
|
MohmedH/OpulentGroupDashboard
|
a071196da0146125b627894178125c1739dfbd7d
|
[
"MIT"
] | 1
|
2020-12-10T21:11:44.000Z
|
2020-12-10T21:11:44.000Z
|
migrations/versions/ee90ed45fc44_.py
|
MohmedH/OpulentGroupDashboard
|
a071196da0146125b627894178125c1739dfbd7d
|
[
"MIT"
] | null | null | null |
migrations/versions/ee90ed45fc44_.py
|
MohmedH/OpulentGroupDashboard
|
a071196da0146125b627894178125c1739dfbd7d
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: ee90ed45fc44
Revises: 0d7c1a126270
Create Date: 2020-07-21 00:57:02.324442
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ee90ed45fc44'
down_revision = '0d7c1a126270'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('PortfolioMaster', 'gains',
existing_type=sa.REAL(),
nullable=False)
op.alter_column('PortfolioMaster', 'losses',
existing_type=sa.REAL(),
nullable=False)
op.alter_column('PortfolioMaster', 'total',
existing_type=sa.REAL(),
nullable=False)
op.alter_column('PortfolioMaster', 'weight',
existing_type=sa.REAL(),
nullable=False)
op.alter_column('PortfolioMaster', 'withdrawls',
existing_type=sa.REAL(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('PortfolioMaster', 'withdrawls',
existing_type=sa.REAL(),
nullable=True)
op.alter_column('PortfolioMaster', 'weight',
existing_type=sa.REAL(),
nullable=True)
op.alter_column('PortfolioMaster', 'total',
existing_type=sa.REAL(),
nullable=True)
op.alter_column('PortfolioMaster', 'losses',
existing_type=sa.REAL(),
nullable=True)
op.alter_column('PortfolioMaster', 'gains',
existing_type=sa.REAL(),
nullable=True)
# ### end Alembic commands ###
| 30.421053
| 65
| 0.600346
| 174
| 1,734
| 5.850575
| 0.316092
| 0.068762
| 0.127701
| 0.275049
| 0.720039
| 0.720039
| 0.711198
| 0.711198
| 0.711198
| 0.711198
| 0
| 0.039872
| 0.276817
| 1,734
| 56
| 66
| 30.964286
| 0.77193
| 0.170127
| 0
| 0.789474
| 0
| 0
| 0.169757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29f6986f6ef48ee0084d1f7cb235b8346602836c
| 23,578
|
py
|
Python
|
test/operations/CorrectTest.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
test/operations/CorrectTest.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
test/operations/CorrectTest.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import unittest
from softwareprocess.operations.correct import Correct
class CorrectTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_validate_parameter_mandatory_information(self):
# tests following mandatory parameter presence
# lat xdyy.y
# long xdyy.y
# altitude xdyy.y
# assumedLat xdyy.y
# assumedLong xdyy.y
expected = "mandatory information is missing"
# happy path
# all parameter present
input_dict = {'op': 'correct', 'lat': "asdsad", 'long': "adsa",
'assumedLat': 'unknown', 'assumedLong': '2016-01-17',
'altitude': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# missing lat
input_dict = {'op': 'correct', 'lat1': "asdsad", 'long': "adsa",
'assumedLat': 'unknown', 'assumedLong': '2016-01-17',
'altitude': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(expected in result)
# missing long
input_dict = {'op': 'correct', 'lat': "asdsad", 'long1': "adsa",
'assumedLat': 'unknown', 'assumedLong': '2016-01-17',
'altitude': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(expected in result)
# missing altitude
input_dict = {'op': 'correct', 'lat': "asdsad", 'long': "adsa",
'assumedLat': 'unknown', 'assumedLong': '2016-01-17',
'altitude1': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(expected in result)
# missing assumedLat
input_dict = {'op': 'correct', 'lat': "asdsad", 'long': "adsa",
'assumed1Lat': 'unknown', 'assumedLong': '2016-01-17',
'altitude': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(expected in result)
# missing assumedLong
input_dict = {'op': 'correct', 'lat': "asdsad", 'long': "adsa",
'assumedLat': 'unknown', 'assumed1Long': '2016-01-17',
'altitude': '03:15:99'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(expected in result)
def test_validate_parameter_long(self):
# long: mandatory, unvalidated,
# xdyy.y
# x GE 0 nd LT 360
# yy.y GE 0 and LT 60.0
# happy path
# high bound long
input_dict = {'op': 'correct', 'long': "359d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'assumedLong': '89d59.9',
'altitude': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# low bound long
input_dict = {'op': 'correct', 'long': "0d0.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# normal long
input_dict = {'op': 'correct', 'long': '21d12.0', 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# not string
input_dict = {'op': 'correct', 'long': 123, 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Longitude Must be A String Value" in result, result)
# bad format
input_dict = {'op': 'correct', 'long': "-89d001.9", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Longitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'long': "12ddd12.3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Longitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'long': "12d12..3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Longitude Format: xdyy.y" in result, result)
# out of high range
input_dict = {'op': 'correct', 'long': "360d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Longitude Out of Range: 0.0 <= long < 360.0" in result, result)
# out of low range
input_dict = {'op': 'correct', 'long': "-1d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Longitude Out of Range: 0.0 <= long < 360.0" in result, result)
# out of arc minute range
input_dict = {'op': 'correct', 'long': "30d70.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Longitude Minute Out of Range: 0 <= long < 60.0" in result, result)
def test_validate_parameter_assumedLong(self):
# assumedLong: mandatory, unvalidated,
# xdyy.y
# x GE 0 nd LT 360
# yy.y GE 0 and LT 60.0
# happy path
# high bound assumedLong
input_dict = {'op': 'correct', 'assumedLong': "359d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'altitude': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# low bound assumedLong
input_dict = {'op': 'correct', 'assumedLong': "0d0.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# normal assumedLong
input_dict = {'op': 'correct', 'assumedLong': "21d12.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# not string
input_dict = {'op': 'correct', 'assumedLong': 123, 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Longitude Must be A String Value" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLong': "-89d001.9", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Longitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLong': "12ddd12.3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Longitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLong': "12d12..3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Longitude Format: xdyy.y" in result, result)
# out of high range
input_dict = {'op': 'correct', 'assumedLong': "360d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Longitude Out of Range: 0.0 <= assumedLong < 360.0" in result, result)
# out of low range
input_dict = {'op': 'correct', 'assumedLong': "-1d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Longitude Out of Range: 0.0 <= assumedLong < 360.0" in result, result)
# out of arc minute range
input_dict = {'op': 'correct', 'assumedLong': "30d70.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Longitude Minute Out of Range: 0 <= assumedLong < 60.0" in result, result)
def test_validate_parameter_lat(self):
# lat: mandatory, unvalidated,
# xdyy.y
# x GT -90 and LT 90
# yy.y GT 0 and LT 60.0
# happy path
# high boiund lat
input_dict = {'op': 'correct', 'lat': "89d59.9", 'long': "89d59.9",
'assumedLat': '89d59.9', 'assumedLong': '89d59.9',
'altitude': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# low bound lat
input_dict = {'op': 'correct', 'lat': "-89d59.9", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# normal lat
input_dict = {'op': 'correct', 'lat': "0d0.0", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# not string
input_dict = {'op': 'correct', 'lat': 123, 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Latitude Must be A String Value" in result, result)
# bad format
input_dict = {'op': 'correct', 'lat': "-89d001.9", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Latitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'lat': "12ddd12.3", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Latitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'lat': "12d12..3", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Latitude Format: xdyy.y" in result, result)
# out of high range
input_dict = {'op': 'correct', 'lat': "90d00.0", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Latitude Out of Range: -90.0 < lat < 90.0" in result, result)
# out of low range
input_dict = {'op': 'correct', 'lat': "-90d00.0", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Latitude Out of Range: -90.0 < lat < 90.0" in result, result)
# out of arc minute range
input_dict = {'op': 'correct', 'lat': "30d70.0", 'long': "-89d59.9",
'assumedLat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Latitude Minute Out of Range: 0 <= lat < 60.0" in result, result)
def test_validate_parameter_assumedLat(self):
# assumedLat: mandatory, unvalidated,
# xdyy.y
# x GT -90 and LT 90
# yy.y GT 0 and LT 60.0
# happy path
# high bound assumedLat
input_dict = {'op': 'correct', 'assumedLat': "89d59.9", 'long': "89d59.9",
'lat': '89d59.9', 'assumedLong': '89d59.9',
'altitude': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# low bound assumedLat
input_dict = {'op': 'correct', 'assumedLat': "-89d59.9", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# normal assumedLat
input_dict = {'op': 'correct', 'assumedLat': "0d0.0", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# not string
input_dict = {'op': 'correct', 'assumedLat': 123, 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Latitude Must be A String Value" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLat': "-89d001.9", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Latitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLat': "12ddd12.3", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Latitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'assumedLat': "12d12..3", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Assumed Latitude Format: xdyy.y" in result, result)
# out of high range
input_dict = {'op': 'correct', 'assumedLat': "90d00.0", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Latitude Out of Range: -90.0 < assumedLat < 90.0" in result, result)
# out of low range
input_dict = {'op': 'correct', 'assumedLat': "-90d00.0", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Latitude Out of Range: -90.0 < assumedLat < 90.0" in result, result)
# out of arc minute range
input_dict = {'op': 'correct', 'assumedLat': "30d70.0", 'long': "-89d59.9",
'lat': '-89d59.9', 'assumedLong': '-89d59.9',
'altitude': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Assumed Latitude Minute Out of Range: 0 <= assumedLat < 60.0" in result, result)
def test_validate_parameter_altitude(self):
# altitude: mandatory, unvalidated,
# xdyy.y
# x GT 0 and LT 90
# yy.y GE 0 and LT 60.0
# happy path
# high bound assumedLong
input_dict = {'op': 'correct', 'altitude': "89d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'assumedLong': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# low bound assumedLong
input_dict = {'op': 'correct', 'altitude': "0d0.1", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# normal assumedLong
input_dict = {'op': 'correct', 'altitude': "21d12.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# not string
input_dict = {'op': 'correct', 'altitude': 123, 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Altitude Must be A String Value" in result, result)
# bad format
input_dict = {'op': 'correct', 'altitude': "-89d001.9", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Altitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'altitude': "12ddd12.3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Altitude Format: xdyy.y" in result, result)
# bad format
input_dict = {'op': 'correct', 'altitude': "12d12..3", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Incorrect Altitude Format: xdyy.y" in result, result)
# out of high range
input_dict = {'op': 'correct', 'altitude': "90d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Altitude Out of Range: 0.0 < altitude < 90.0" in result, result)
# out of low range
input_dict = {'op': 'correct', 'altitude': "0d00.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Altitude value cannot be less than 0d0.1" in result, result)
# out of arc minute range
input_dict = {'op': 'correct', 'altitude': "0d70.0", 'lat': "-89d59.9",
'assumedLat': '-89d59.9', 'long': '-89d59.9',
'assumedLong': '-89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Altitude Minute Out of Range: 0 <= altitude < 60.0" in result, result)
def test_validate_parameter_no_correctedAzimuth(self):
# correctedAzimuth should nto be in input dictionary
# yy.y GE 0 and LT 60.0
# happy path
# no correctAzimuth
input_dict = {'op': 'correct', 'altitude': "89d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'assumedLong': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# correctAzimuth in dictionary
input_dict = {'op': 'correct', 'altitude': "89d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'assumedLong': '89d59.9', 'correctedAzimuth': "asda"}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Input Dictionary Contains Forbidden Parameter: correctAzimuth/correctedDistance" in result, result)
def test_validate_parameter_no_correctedDistance(self):
# correctedDistance should nto be in input dictionary
# yy.y GE 0 and LT 60.0
# happy path
# no correctedDistance
input_dict = {'op': 'correct', 'altitude': "89d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'assumedLong': '89d59.9'}
result = Correct.validate_parameter(input_dict)
self.assertTrue(result)
# sad path
# correctedDistance in dictionary
input_dict = {'op': 'correct', 'altitude': "89d59.9", 'lat': "89d59.9",
'assumedLat': '89d59.9', 'long': '89d59.9',
'assumedLong': '89d59.9', 'correctedDistance': "asda"}
result = Correct.validate_parameter(input_dict)
self.assertTrue("Input Dictionary Contains Forbidden Parameter: correctAzimuth/correctedDistance" in result, result)
| 46.874751
| 124
| 0.527823
| 2,450
| 23,578
| 4.993061
| 0.050204
| 0.110357
| 0.053952
| 0.088286
| 0.935421
| 0.919562
| 0.894057
| 0.887109
| 0.876155
| 0.859397
| 0
| 0.096434
| 0.31962
| 23,578
| 502
| 125
| 46.968127
| 0.666126
| 0.077106
| 0
| 0.735016
| 0
| 0
| 0.307948
| 0.002956
| 0
| 0
| 0
| 0
| 0.189274
| 1
| 0.031546
| false
| 0.006309
| 0.009464
| 0
| 0.044164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b018545756187250d7c858b4eead048b820b534
| 47
|
py
|
Python
|
contests_yukicoder/283/283_d.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
contests_yukicoder/283/283_d.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | 1
|
2021-01-02T06:36:51.000Z
|
2021-01-02T06:36:51.000Z
|
contests_yukicoder/283/283_d.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
5 64000000000000
00100
00100
11111
00100
00100
| 6.714286
| 16
| 0.851064
| 7
| 47
| 5.714286
| 0.571429
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148936
| 47
| 6
| 17
| 7.833333
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9cded9cabf873a8eab72e620d924d51e8d3a8f0
| 54,406
|
py
|
Python
|
django_airavata/wagtailapps/base/migrations/0007_auto_20180415_0045.py
|
sairohithA007/airavata-django-portal
|
fe18d65802f02c9faf805c8edfdee3341c66e93a
|
[
"Apache-2.0"
] | 19
|
2017-09-04T00:36:52.000Z
|
2022-01-24T08:44:22.000Z
|
django_airavata/wagtailapps/base/migrations/0007_auto_20180415_0045.py
|
sairohithA007/airavata-django-portal
|
fe18d65802f02c9faf805c8edfdee3341c66e93a
|
[
"Apache-2.0"
] | 35
|
2017-10-17T02:36:01.000Z
|
2022-03-09T04:46:57.000Z
|
django_airavata/wagtailapps/base/migrations/0007_auto_20180415_0045.py
|
sairohithA007/airavata-django-portal
|
fe18d65802f02c9faf805c8edfdee3341c66e93a
|
[
"Apache-2.0"
] | 38
|
2017-09-15T14:17:42.000Z
|
2021-12-15T17:11:31.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-15 00:45
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('django_airavata_wagtail_base', '0006_auto_20180415_0040'),
]
operations = [
migrations.AlterField(
model_name='cybergatewayhomepage',
name='footer',
field=wagtail.core.fields.StreamField((('paragraph_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)), ('body', wagtail.core.blocks.RichTextBlock())))), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('width', wagtail.core.blocks.CharBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False)), ('redirect_url', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a redirect link on clicking the image', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('embed_block', wagtail.core.blocks.StructBlock((('embed', wagtail.embeds.blocks.EmbedBlock()), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_jumbotron', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.RichTextBlock()), ('button_text', wagtail.core.blocks.TextBlock(required=False)), ('button_link', wagtail.core.blocks.TextBlock(required=False)), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_alert', wagtail.core.blocks.StructBlock((('alert_text', wagtail.core.blocks.TextBlock()), ('alert_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('alert-primary', 'DEFAULT'), ('alert-secondary', 'GREY'), ('alert-success', 'GREEN'), ('alert-danger', 'RED'), ('alert-warning', 'ORANGE'), ('alert-dark', 'DARK'), ('alert-light', 'LIGHT')], help_text='select a background color', required=False)), ('is_link', wagtail.core.blocks.BooleanBlock(required=False)), ('alert_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_button', wagtail.core.blocks.StructBlock((('button_text', wagtail.core.blocks.TextBlock()), ('button_link', wagtail.core.blocks.TextBlock()), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_card', wagtail.core.blocks.StructBlock((('card_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='18 works best for card', required=False)), ('is_card_img', wagtail.core.blocks.BooleanBlock(required=False)), ('card_img', wagtail.images.blocks.ImageChooserBlock(required=False)), ('card_img_width', wagtail.core.blocks.IntegerBlock(help_text='provide an image width', required=False)), ('card_img_height', wagtail.core.blocks.IntegerBlock(help_text='provide an image height', required=False)), ('card_title', wagtail.core.blocks.TextBlock(blank=True, null=True, required=False)), ('card_text', wagtail.core.blocks.RichTextBlock(blank=True, null=True, required=False)), ('card_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('card_text_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('text-primary', 'DEFAULT'), ('text-secondary', 'GREY'), ('text-success', 'GREEN'), ('text-danger', 'RED'), ('text-warning', 'ORANGE'), ('text-dark', 'DARK'), ('text-light', 'LIGHT')], help_text='select a text color', required=False)), ('btn_text', wagtail.core.blocks.TextBlock(required=False)), ('btn_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('btn_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_carousel', wagtail.core.blocks.StructBlock((('c_image1', wagtail.images.blocks.ImageChooserBlock(required=True)), ('c_image1_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 1', required=False)), ('c_image1_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 1', required=False)), ('c_image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image2_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 2', required=False)), ('c_image2_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 2', required=False)), ('c_image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image3_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 3', required=False)), ('c_image3_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 3', required=False)), ('c_image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image4_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 4', required=False)), ('c_image4_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 4', required=False)), ('c_image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image5_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 5', required=False)), ('c_image5_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 5', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_well', wagtail.core.blocks.StructBlock((('message', wagtail.core.blocks.RichTextBlock(help_text='Enter some message inside well')), ('well_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('horizontal_rule', wagtail.core.blocks.StructBlock((('thickness', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter a thickness integer value. Eg(10)', required=False)), ('bg_color', wagtail.core.blocks.TextBlock(help_text='Enter a hexcode color for the rule Eg(#000000)', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_media_object', wagtail.core.blocks.StructBlock((('media_img', wagtail.images.blocks.ImageChooserBlock(required=True)), ('media_img_alt', wagtail.core.blocks.TextBlock(required=True)), ('media_img_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image width as an integer value. Eg(50)', required=False)), ('media_img_height', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image height as an integer value Eg(50)', required=False)), ('heading_text', wagtail.core.blocks.TextBlock(blank=True, help_text='enter some heading text for media object', required=False)), ('body_text', wagtail.core.blocks.RichTextBlock(help_text='Enter some message for the media object', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('placeholder_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)),))), ('font_awesome_icon_block', wagtail.core.blocks.StructBlock((('icon_tag', wagtail.core.blocks.TextBlock(blank=False, help_text='Provide a font awesome icon class text', required=True)), ('icon_size', wagtail.core.blocks.IntegerBlock(blank=True, default=2, help_text='Provide a icon size in number type', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)))))), blank=True, null=True, verbose_name='Footer Content Block'),
),
migrations.AlterField(
model_name='footertext',
name='footer',
field=wagtail.core.fields.StreamField((('paragraph_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)), ('body', wagtail.core.blocks.RichTextBlock())))), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('width', wagtail.core.blocks.CharBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False)), ('redirect_url', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a redirect link on clicking the image', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('embed_block', wagtail.core.blocks.StructBlock((('embed', wagtail.embeds.blocks.EmbedBlock()), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_jumbotron', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.RichTextBlock()), ('button_text', wagtail.core.blocks.TextBlock(required=False)), ('button_link', wagtail.core.blocks.TextBlock(required=False)), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_alert', wagtail.core.blocks.StructBlock((('alert_text', wagtail.core.blocks.TextBlock()), ('alert_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('alert-primary', 'DEFAULT'), ('alert-secondary', 'GREY'), ('alert-success', 'GREEN'), ('alert-danger', 'RED'), ('alert-warning', 'ORANGE'), ('alert-dark', 'DARK'), ('alert-light', 'LIGHT')], help_text='select a background color', required=False)), ('is_link', wagtail.core.blocks.BooleanBlock(required=False)), ('alert_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_button', wagtail.core.blocks.StructBlock((('button_text', wagtail.core.blocks.TextBlock()), ('button_link', wagtail.core.blocks.TextBlock()), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_card', wagtail.core.blocks.StructBlock((('card_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='18 works best for card', required=False)), ('is_card_img', wagtail.core.blocks.BooleanBlock(required=False)), ('card_img', wagtail.images.blocks.ImageChooserBlock(required=False)), ('card_img_width', wagtail.core.blocks.IntegerBlock(help_text='provide an image width', required=False)), ('card_img_height', wagtail.core.blocks.IntegerBlock(help_text='provide an image height', required=False)), ('card_title', wagtail.core.blocks.TextBlock(blank=True, null=True, required=False)), ('card_text', wagtail.core.blocks.RichTextBlock(blank=True, null=True, required=False)), ('card_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('card_text_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('text-primary', 'DEFAULT'), ('text-secondary', 'GREY'), ('text-success', 'GREEN'), ('text-danger', 'RED'), ('text-warning', 'ORANGE'), ('text-dark', 'DARK'), ('text-light', 'LIGHT')], help_text='select a text color', required=False)), ('btn_text', wagtail.core.blocks.TextBlock(required=False)), ('btn_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('btn_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_carousel', wagtail.core.blocks.StructBlock((('c_image1', wagtail.images.blocks.ImageChooserBlock(required=True)), ('c_image1_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 1', required=False)), ('c_image1_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 1', required=False)), ('c_image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image2_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 2', required=False)), ('c_image2_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 2', required=False)), ('c_image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image3_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 3', required=False)), ('c_image3_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 3', required=False)), ('c_image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image4_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 4', required=False)), ('c_image4_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 4', required=False)), ('c_image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image5_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 5', required=False)), ('c_image5_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 5', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_well', wagtail.core.blocks.StructBlock((('message', wagtail.core.blocks.RichTextBlock(help_text='Enter some message inside well')), ('well_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('horizontal_rule', wagtail.core.blocks.StructBlock((('thickness', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter a thickness integer value. Eg(10)', required=False)), ('bg_color', wagtail.core.blocks.TextBlock(help_text='Enter a hexcode color for the rule Eg(#000000)', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_media_object', wagtail.core.blocks.StructBlock((('media_img', wagtail.images.blocks.ImageChooserBlock(required=True)), ('media_img_alt', wagtail.core.blocks.TextBlock(required=True)), ('media_img_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image width as an integer value. Eg(50)', required=False)), ('media_img_height', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image height as an integer value Eg(50)', required=False)), ('heading_text', wagtail.core.blocks.TextBlock(blank=True, help_text='enter some heading text for media object', required=False)), ('body_text', wagtail.core.blocks.RichTextBlock(help_text='Enter some message for the media object', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('placeholder_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)),))), ('font_awesome_icon_block', wagtail.core.blocks.StructBlock((('icon_tag', wagtail.core.blocks.TextBlock(blank=False, help_text='Provide a font awesome icon class text', required=True)), ('icon_size', wagtail.core.blocks.IntegerBlock(blank=True, default=2, help_text='Provide a icon size in number type', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)))))), blank=True, null=True, verbose_name='Footer content block'),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField((('paragraph_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)), ('body', wagtail.core.blocks.RichTextBlock())))), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('width', wagtail.core.blocks.CharBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False)), ('redirect_url', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a redirect link on clicking the image', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('embed_block', wagtail.core.blocks.StructBlock((('embed', wagtail.embeds.blocks.EmbedBlock()), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_jumbotron', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.RichTextBlock()), ('button_text', wagtail.core.blocks.TextBlock(required=False)), ('button_link', wagtail.core.blocks.TextBlock(required=False)), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_alert', wagtail.core.blocks.StructBlock((('alert_text', wagtail.core.blocks.TextBlock()), ('alert_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('alert-primary', 'DEFAULT'), ('alert-secondary', 'GREY'), ('alert-success', 'GREEN'), ('alert-danger', 'RED'), ('alert-warning', 'ORANGE'), ('alert-dark', 'DARK'), ('alert-light', 'LIGHT')], help_text='select a background color', required=False)), ('is_link', wagtail.core.blocks.BooleanBlock(required=False)), ('alert_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_button', wagtail.core.blocks.StructBlock((('button_text', wagtail.core.blocks.TextBlock()), ('button_link', wagtail.core.blocks.TextBlock()), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_card', wagtail.core.blocks.StructBlock((('card_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='18 works best for card', required=False)), ('is_card_img', wagtail.core.blocks.BooleanBlock(required=False)), ('card_img', wagtail.images.blocks.ImageChooserBlock(required=False)), ('card_img_width', wagtail.core.blocks.IntegerBlock(help_text='provide an image width', required=False)), ('card_img_height', wagtail.core.blocks.IntegerBlock(help_text='provide an image height', required=False)), ('card_title', wagtail.core.blocks.TextBlock(blank=True, null=True, required=False)), ('card_text', wagtail.core.blocks.RichTextBlock(blank=True, null=True, required=False)), ('card_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('card_text_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('text-primary', 'DEFAULT'), ('text-secondary', 'GREY'), ('text-success', 'GREEN'), ('text-danger', 'RED'), ('text-warning', 'ORANGE'), ('text-dark', 'DARK'), ('text-light', 'LIGHT')], help_text='select a text color', required=False)), ('btn_text', wagtail.core.blocks.TextBlock(required=False)), ('btn_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('btn_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_carousel', wagtail.core.blocks.StructBlock((('c_image1', wagtail.images.blocks.ImageChooserBlock(required=True)), ('c_image1_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 1', required=False)), ('c_image1_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 1', required=False)), ('c_image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image2_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 2', required=False)), ('c_image2_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 2', required=False)), ('c_image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image3_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 3', required=False)), ('c_image3_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 3', required=False)), ('c_image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image4_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 4', required=False)), ('c_image4_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 4', required=False)), ('c_image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image5_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 5', required=False)), ('c_image5_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 5', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_well', wagtail.core.blocks.StructBlock((('message', wagtail.core.blocks.RichTextBlock(help_text='Enter some message inside well')), ('well_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('horizontal_rule', wagtail.core.blocks.StructBlock((('thickness', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter a thickness integer value. Eg(10)', required=False)), ('bg_color', wagtail.core.blocks.TextBlock(help_text='Enter a hexcode color for the rule Eg(#000000)', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_media_object', wagtail.core.blocks.StructBlock((('media_img', wagtail.images.blocks.ImageChooserBlock(required=True)), ('media_img_alt', wagtail.core.blocks.TextBlock(required=True)), ('media_img_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image width as an integer value. Eg(50)', required=False)), ('media_img_height', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image height as an integer value Eg(50)', required=False)), ('heading_text', wagtail.core.blocks.TextBlock(blank=True, help_text='enter some heading text for media object', required=False)), ('body_text', wagtail.core.blocks.RichTextBlock(help_text='Enter some message for the media object', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('placeholder_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)),))), ('font_awesome_icon_block', wagtail.core.blocks.StructBlock((('icon_tag', wagtail.core.blocks.TextBlock(blank=False, help_text='Provide a font awesome icon class text', required=True)), ('icon_size', wagtail.core.blocks.IntegerBlock(blank=True, default=2, help_text='Provide a icon size in number type', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)))))), blank=True, null=True, verbose_name='Home content block'),
),
migrations.AlterField(
model_name='rowblankpagerelation',
name='body',
field=wagtail.core.fields.StreamField((('paragraph_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)), ('body', wagtail.core.blocks.RichTextBlock())))), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('width', wagtail.core.blocks.CharBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False)), ('redirect_url', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a redirect link on clicking the image', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('embed_block', wagtail.core.blocks.StructBlock((('embed', wagtail.embeds.blocks.EmbedBlock()), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_jumbotron', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.RichTextBlock()), ('button_text', wagtail.core.blocks.TextBlock(required=False)), ('button_link', wagtail.core.blocks.TextBlock(required=False)), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_alert', wagtail.core.blocks.StructBlock((('alert_text', wagtail.core.blocks.TextBlock()), ('alert_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('alert-primary', 'DEFAULT'), ('alert-secondary', 'GREY'), ('alert-success', 'GREEN'), ('alert-danger', 'RED'), ('alert-warning', 'ORANGE'), ('alert-dark', 'DARK'), ('alert-light', 'LIGHT')], help_text='select a background color', required=False)), ('is_link', wagtail.core.blocks.BooleanBlock(required=False)), ('alert_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_button', wagtail.core.blocks.StructBlock((('button_text', wagtail.core.blocks.TextBlock()), ('button_link', wagtail.core.blocks.TextBlock()), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_card', wagtail.core.blocks.StructBlock((('card_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='18 works best for card', required=False)), ('is_card_img', wagtail.core.blocks.BooleanBlock(required=False)), ('card_img', wagtail.images.blocks.ImageChooserBlock(required=False)), ('card_img_width', wagtail.core.blocks.IntegerBlock(help_text='provide an image width', required=False)), ('card_img_height', wagtail.core.blocks.IntegerBlock(help_text='provide an image height', required=False)), ('card_title', wagtail.core.blocks.TextBlock(blank=True, null=True, required=False)), ('card_text', wagtail.core.blocks.RichTextBlock(blank=True, null=True, required=False)), ('card_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('card_text_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('text-primary', 'DEFAULT'), ('text-secondary', 'GREY'), ('text-success', 'GREEN'), ('text-danger', 'RED'), ('text-warning', 'ORANGE'), ('text-dark', 'DARK'), ('text-light', 'LIGHT')], help_text='select a text color', required=False)), ('btn_text', wagtail.core.blocks.TextBlock(required=False)), ('btn_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('btn_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_carousel', wagtail.core.blocks.StructBlock((('c_image1', wagtail.images.blocks.ImageChooserBlock(required=True)), ('c_image1_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 1', required=False)), ('c_image1_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 1', required=False)), ('c_image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image2_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 2', required=False)), ('c_image2_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 2', required=False)), ('c_image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image3_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 3', required=False)), ('c_image3_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 3', required=False)), ('c_image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image4_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 4', required=False)), ('c_image4_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 4', required=False)), ('c_image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image5_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 5', required=False)), ('c_image5_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 5', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_well', wagtail.core.blocks.StructBlock((('message', wagtail.core.blocks.RichTextBlock(help_text='Enter some message inside well')), ('well_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('horizontal_rule', wagtail.core.blocks.StructBlock((('thickness', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter a thickness integer value. Eg(10)', required=False)), ('bg_color', wagtail.core.blocks.TextBlock(help_text='Enter a hexcode color for the rule Eg(#000000)', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_media_object', wagtail.core.blocks.StructBlock((('media_img', wagtail.images.blocks.ImageChooserBlock(required=True)), ('media_img_alt', wagtail.core.blocks.TextBlock(required=True)), ('media_img_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image width as an integer value. Eg(50)', required=False)), ('media_img_height', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image height as an integer value Eg(50)', required=False)), ('heading_text', wagtail.core.blocks.TextBlock(blank=True, help_text='enter some heading text for media object', required=False)), ('body_text', wagtail.core.blocks.RichTextBlock(help_text='Enter some message for the media object', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('placeholder_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)),))), ('font_awesome_icon_block', wagtail.core.blocks.StructBlock((('icon_tag', wagtail.core.blocks.TextBlock(blank=False, help_text='Provide a font awesome icon class text', required=True)), ('icon_size', wagtail.core.blocks.IntegerBlock(blank=True, default=2, help_text='Provide a icon size in number type', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)))))), blank=True, null=True, verbose_name='Row Content'),
),
migrations.AlterField(
model_name='rowcybergatewayhomepagerelation',
name='body',
field=wagtail.core.fields.StreamField((('paragraph_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)), ('body', wagtail.core.blocks.RichTextBlock())))), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('width', wagtail.core.blocks.CharBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False)), ('redirect_url', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a redirect link on clicking the image', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('embed_block', wagtail.core.blocks.StructBlock((('embed', wagtail.embeds.blocks.EmbedBlock()), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_jumbotron', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.RichTextBlock()), ('button_text', wagtail.core.blocks.TextBlock(required=False)), ('button_link', wagtail.core.blocks.TextBlock(required=False)), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_alert', wagtail.core.blocks.StructBlock((('alert_text', wagtail.core.blocks.TextBlock()), ('alert_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('alert-primary', 'DEFAULT'), ('alert-secondary', 'GREY'), ('alert-success', 'GREEN'), ('alert-danger', 'RED'), ('alert-warning', 'ORANGE'), ('alert-dark', 'DARK'), ('alert-light', 'LIGHT')], help_text='select a background color', required=False)), ('is_link', wagtail.core.blocks.BooleanBlock(required=False)), ('alert_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_button', wagtail.core.blocks.StructBlock((('button_text', wagtail.core.blocks.TextBlock()), ('button_link', wagtail.core.blocks.TextBlock()), ('button_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'DEFAULT'), ('btn-lg', 'LARGE'), ('btn-sm', 'SMALL')], help_text='select a button size', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_card', wagtail.core.blocks.StructBlock((('card_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='18 works best for card', required=False)), ('is_card_img', wagtail.core.blocks.BooleanBlock(required=False)), ('card_img', wagtail.images.blocks.ImageChooserBlock(required=False)), ('card_img_width', wagtail.core.blocks.IntegerBlock(help_text='provide an image width', required=False)), ('card_img_height', wagtail.core.blocks.IntegerBlock(help_text='provide an image height', required=False)), ('card_title', wagtail.core.blocks.TextBlock(blank=True, null=True, required=False)), ('card_text', wagtail.core.blocks.RichTextBlock(blank=True, null=True, required=False)), ('card_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('card_text_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('text-primary', 'DEFAULT'), ('text-secondary', 'GREY'), ('text-success', 'GREEN'), ('text-danger', 'RED'), ('text-warning', 'ORANGE'), ('text-dark', 'DARK'), ('text-light', 'LIGHT')], help_text='select a text color', required=False)), ('btn_text', wagtail.core.blocks.TextBlock(required=False)), ('btn_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('btn-primary', 'DEFAULT'), ('btn-danger', 'RED'), ('btn-secondary', 'GREY'), ('btn-success', 'GREEN'), ('btn-warning', 'ORANGE')], help_text='select a button color', required=False)), ('btn_link', wagtail.core.blocks.TextBlock(required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_carousel', wagtail.core.blocks.StructBlock((('c_image1', wagtail.images.blocks.ImageChooserBlock(required=True)), ('c_image1_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 1', required=False)), ('c_image1_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 1', required=False)), ('c_image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image2_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 2', required=False)), ('c_image2_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 2', required=False)), ('c_image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image3_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 3', required=False)), ('c_image3_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 3', required=False)), ('c_image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image4_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 4', required=False)), ('c_image4_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 4', required=False)), ('c_image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('c_image5_title', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a title for image 5', required=False)), ('c_image5_body', wagtail.core.blocks.TextBlock(blank=True, help_text='Give a body for image 5', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_well', wagtail.core.blocks.StructBlock((('message', wagtail.core.blocks.RichTextBlock(help_text='Enter some message inside well')), ('well_bg_color', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('bg-primary', 'DEFAULT'), ('bg-secondary', 'GREY'), ('bg-success', 'GREEN'), ('bg-danger', 'RED'), ('bg-warning', 'ORANGE'), ('bg-dark', 'DARK'), ('bg-light', 'LIGHT')], help_text='select a background color', required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('horizontal_rule', wagtail.core.blocks.StructBlock((('thickness', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter a thickness integer value. Eg(10)', required=False)), ('bg_color', wagtail.core.blocks.TextBlock(help_text='Enter a hexcode color for the rule Eg(#000000)', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('bootstrap_media_object', wagtail.core.blocks.StructBlock((('media_img', wagtail.images.blocks.ImageChooserBlock(required=True)), ('media_img_alt', wagtail.core.blocks.TextBlock(required=True)), ('media_img_width', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image width as an integer value. Eg(50)', required=False)), ('media_img_height', wagtail.core.blocks.IntegerBlock(blank=True, help_text='Enter an image height as an integer value Eg(50)', required=False)), ('heading_text', wagtail.core.blocks.TextBlock(blank=True, help_text='enter some heading text for media object', required=False)), ('body_text', wagtail.core.blocks.RichTextBlock(help_text='Enter some message for the media object', required=True)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False))))), ('placeholder_block', wagtail.core.blocks.StructBlock((('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)),))), ('font_awesome_icon_block', wagtail.core.blocks.StructBlock((('icon_tag', wagtail.core.blocks.TextBlock(blank=False, help_text='Provide a font awesome icon class text', required=True)), ('icon_size', wagtail.core.blocks.IntegerBlock(blank=True, default=2, help_text='Provide a icon size in number type', null=True, required=False)), ('custom_class', wagtail.core.blocks.TextBlock(blank=True, help_text='control this element by giving unique class names separated by space and styling the class in css', required=False)))))), blank=True, null=True, verbose_name='Row Content'),
),
]
| 1,209.022222
| 10,687
| 0.748649
| 7,498
| 54,406
| 5.331422
| 0.023606
| 0.113371
| 0.172658
| 0.126829
| 0.988718
| 0.988718
| 0.987692
| 0.987692
| 0.987692
| 0.987692
| 0
| 0.005427
| 0.075414
| 54,406
| 44
| 10,688
| 1,236.5
| 0.789257
| 0.00125
| 0
| 0.459459
| 1
| 0
| 0.378007
| 0.00565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.162162
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d9fa77e1de6fa0b98c9f47e60239643b04a3a37f
| 7,678
|
py
|
Python
|
python/test/raw_extractor_test.py
|
fishjam/vmaf
|
030ded43ea77eb6e16b5bb69ef937b1600494b6c
|
[
"Apache-2.0"
] | null | null | null |
python/test/raw_extractor_test.py
|
fishjam/vmaf
|
030ded43ea77eb6e16b5bb69ef937b1600494b6c
|
[
"Apache-2.0"
] | null | null | null |
python/test/raw_extractor_test.py
|
fishjam/vmaf
|
030ded43ea77eb6e16b5bb69ef937b1600494b6c
|
[
"Apache-2.0"
] | 2
|
2020-07-10T03:03:24.000Z
|
2020-07-10T03:33:10.000Z
|
from vmaf.core.executor import run_executors_in_parallel
__copyright__ = "Copyright 2016-2019, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import unittest
import os
import numpy as np
from vmaf.config import VmafConfig
from vmaf.core.asset import Asset
from vmaf.core.raw_extractor import AssetExtractor, DisYUVRawVideoExtractor
class RawExtractorTest(unittest.TestCase):
def test_run_asset_extractor(self):
print 'test on running asset extractor...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324,
'quality_width':160, 'quality_height':90})
asset_original = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324,
'quality_width':160, 'quality_height':90})
self.fextractor = AssetExtractor(
[asset, asset_original], None, fifo_mode=True)
self.fextractor.run()
results = self.fextractor.results
self.assertEqual(str(results[0]['asset']), 'test_0_1_src01_hrc00_576x324_576x324_vs_src01_hrc01_576x324_576x324_q_160x90')
self.assertEqual(str(results[1]['asset']), 'test_0_2_src01_hrc00_576x324_576x324_vs_src01_hrc00_576x324_576x324_q_160x90')
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertEqual(str(results[0]['asset']), 'test_0_1_src01_hrc00_576x324_576x324_vs_src01_hrc01_576x324_576x324_q_160x90')
self.assertEqual(str(results[1]['asset']), 'test_0_2_src01_hrc00_576x324_576x324_vs_src01_hrc00_576x324_576x324_q_160x90')
class DisYUVRawVideoExtractorTest(unittest.TestCase):
def setUp(self):
self.h5py_filepath = VmafConfig.workdir_path('test.hdf5')
def tearDown(self):
if os.path.exists(self.h5py_filepath):
os.remove(self.h5py_filepath)
def test_run_dis_yuv_raw_video_extractor(self):
print 'test on running dis YUV raw video extractor...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324})
asset_original = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324})
h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)
self.fextractor = DisYUVRawVideoExtractor(
[asset, asset_original], None, fifo_mode=False,
optional_dict={'channels': 'yu'},
optional_dict2={'h5py_file': h5py_file}
)
self.fextractor.run()
results = self.fextractor.results
self.assertAlmostEqual(np.mean(results[0]['dis_y']), 61.332006579182384, places=4)
self.assertAlmostEquals(np.mean(results[1]['dis_y']), 59.788567297525148, places=4)
self.assertAlmostEqual(np.mean(results[0]['dis_u']), 115.23227407335962, places=4)
self.assertAlmostEquals(np.mean(results[1]['dis_u']), 114.49701717535437, places=4)
with self.assertRaises(KeyError):
np.mean(results[0]['dis_v'])
DisYUVRawVideoExtractor.close_h5py_file(h5py_file)
def test_run_dis_yuv_raw_video_extractor_parallel(self):
print 'test on running dis YUV raw video extractor...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324})
asset_original = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324})
h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)
self.fextractor = DisYUVRawVideoExtractor(
[asset, asset_original], None, fifo_mode=False,
optional_dict={'channels': 'yu'},
optional_dict2={'h5py_file': h5py_file}
)
with self.assertRaises(AssertionError):
self.fextractor.run(parallelize=True)
DisYUVRawVideoExtractor.close_h5py_file(h5py_file)
class ParallelDisYRawVideoExtractorTest(unittest.TestCase):
def setUp(self):
self.h5py_filepath = VmafConfig.workdir_path('test.hdf5')
def tearDown(self):
if os.path.exists(self.h5py_filepath):
os.remove(self.h5py_filepath)
def test_run_parallel_dis_y_fextractor(self):
print 'test on running dis YUV raw video extractor in parallel (disabled)...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324})
asset_original = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324})
h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)
optional_dict2 = {'h5py_file': h5py_file}
fextractor = DisYUVRawVideoExtractor(
[asset, asset_original],
None,
fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={'channels': 'yu'},
optional_dict2=optional_dict2
)
self.fextractors = [fextractor]
fextractor.run(parallelize=False) # Can't run parallel: can't pickle FileID objects
results = fextractor.results
self.assertAlmostEqual(np.mean(results[0]['dis_y']), 61.332006579182384, places=4)
self.assertAlmostEquals(np.mean(results[1]['dis_y']), 59.788567297525148, places=4)
self.assertAlmostEqual(np.mean(results[0]['dis_u']), 115.23227407335962, places=4)
self.assertAlmostEquals(np.mean(results[1]['dis_u']), 114.49701717535437, places=4)
with self.assertRaises(KeyError):
np.mean(results[0]['dis_v'])
DisYUVRawVideoExtractor.close_h5py_file(h5py_file)
if __name__ == '__main__':
unittest.main()
| 41.956284
| 130
| 0.640401
| 892
| 7,678
| 5.202915
| 0.142377
| 0.036199
| 0.047404
| 0.044818
| 0.855419
| 0.841629
| 0.811679
| 0.803491
| 0.768153
| 0.768153
| 0
| 0.089105
| 0.251628
| 7,678
| 182
| 131
| 42.186813
| 0.718587
| 0.006121
| 0
| 0.737226
| 0
| 0
| 0.142483
| 0.063966
| 0
| 0
| 0
| 0
| 0.109489
| 0
| null | null | 0
| 0.051095
| null | null | 0.029197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8a0ced62c9cc734696b15fe8bffdab8d4191d0c2
| 9,362
|
py
|
Python
|
tests/pixie/utils_test/test_scale_to_double.py
|
bidfx/bidfx-api-py
|
6b5e2c5efaa547b2d97a5556ef8d21d1de807f68
|
[
"Apache-2.0"
] | 3
|
2020-04-29T09:19:56.000Z
|
2021-03-08T11:12:05.000Z
|
tests/pixie/utils_test/test_scale_to_double.py
|
bidfx/bidfx-api-py
|
6b5e2c5efaa547b2d97a5556ef8d21d1de807f68
|
[
"Apache-2.0"
] | 3
|
2020-03-08T21:54:02.000Z
|
2021-02-02T22:33:51.000Z
|
tests/pixie/utils_test/test_scale_to_double.py
|
bidfx/bidfx-api-py
|
6b5e2c5efaa547b2d97a5556ef8d21d1de807f68
|
[
"Apache-2.0"
] | 2
|
2020-06-13T10:52:18.000Z
|
2022-03-02T17:29:45.000Z
|
import unittest
from bidfx.pricing._pixie.util.buffer_reads import scale_to_double
class TestScaleToDouble(unittest.TestCase):
def test_scale_to_double(self):
self.assertEqual("1234567890.0", scale_to_double(1234567890, 0))
self.assertEqual("1234567890125.0", scale_to_double(1234567890125, 0))
self.assertEqual("1234567890123456.0", scale_to_double(1234567890123456, 0))
self.assertEqual("1234567890250000.0", scale_to_double(1234567890250000, 0))
self.assertEqual("1234567890250002.0", scale_to_double(1234567890250002, 0))
self.assertEqual("1234567890250020.0", scale_to_double(1234567890250020, 0))
self.assertEqual("0.0", scale_to_double(0, 0))
self.assertEqual("5.0", scale_to_double(5, 0))
self.assertEqual("20.0", scale_to_double(20, 0))
self.assertEqual("250.0", scale_to_double(250, 0))
self.assertEqual("123456789.0", scale_to_double(1234567890, 1))
self.assertEqual("123456789012.5", scale_to_double(1234567890125, 1))
self.assertEqual("123456789012345.6", scale_to_double(1234567890123456, 1))
self.assertEqual("123456789025000.0", scale_to_double(1234567890250000, 1))
self.assertEqual("123456789025000.2", scale_to_double(1234567890250002, 1))
self.assertEqual("123456789025002.0", scale_to_double(1234567890250020, 1))
self.assertEqual("0.0", scale_to_double(0, 1))
self.assertEqual("0.5", scale_to_double(5, 1))
self.assertEqual("2.0", scale_to_double(20, 1))
self.assertEqual("25.0", scale_to_double(250, 1))
self.assertEqual("12345678.9", scale_to_double(1234567890, 2))
self.assertEqual("12345678901.25", scale_to_double(1234567890125, 2))
self.assertEqual("12345678901234.56", scale_to_double(1234567890123456, 2))
self.assertEqual("12345678902500.0", scale_to_double(1234567890250000, 2))
self.assertEqual("12345678902500.02", scale_to_double(1234567890250002, 2))
self.assertEqual("12345678902500.2", scale_to_double(1234567890250020, 2))
self.assertEqual("0.0", scale_to_double(0, 2))
self.assertEqual("0.05", scale_to_double(5, 2))
self.assertEqual("0.2", scale_to_double(20, 2))
self.assertEqual("2.5", scale_to_double(250, 2))
self.assertEqual("1234567.89", scale_to_double(1234567890, 3))
self.assertEqual("1234567890.125", scale_to_double(1234567890125, 3))
self.assertEqual("1234567890123.456", scale_to_double(1234567890123456, 3))
self.assertEqual("1234567890250.0", scale_to_double(1234567890250000, 3))
self.assertEqual("1234567890250.002", scale_to_double(1234567890250002, 3))
self.assertEqual("1234567890250.02", scale_to_double(1234567890250020, 3))
self.assertEqual("0.0", scale_to_double(0, 3))
self.assertEqual("0.005", scale_to_double(5, 3))
self.assertEqual("0.02", scale_to_double(20, 3))
self.assertEqual("0.25", scale_to_double(250, 3))
self.assertEqual("1234.56789", scale_to_double(1234567890, 6))
self.assertEqual("1234567.890125", scale_to_double(1234567890125, 6))
self.assertEqual("1234567890.123456", scale_to_double(1234567890123456, 6))
self.assertEqual("1234567890.25", scale_to_double(1234567890250000, 6))
self.assertEqual("1234567890.250002", scale_to_double(1234567890250002, 6))
self.assertEqual("1234567890.25002", scale_to_double(1234567890250020, 6))
self.assertEqual("0.0", scale_to_double(0, 6))
self.assertEqual("0.000005", scale_to_double(5, 6))
self.assertEqual("0.00002", scale_to_double(20, 6))
self.assertEqual("0.00025", scale_to_double(250, 6))
self.assertEqual("0.00123456789", scale_to_double(1234567890, 12))
self.assertEqual("1.234567890125", scale_to_double(1234567890125, 12))
self.assertEqual("1234.567890123456", scale_to_double(1234567890123456, 12))
self.assertEqual("1234.56789025", scale_to_double(1234567890250000, 12))
self.assertEqual("1234.567890250002", scale_to_double(1234567890250002, 12))
self.assertEqual("1234.56789025002", scale_to_double(1234567890250020, 12))
self.assertEqual("0.0", scale_to_double(0, 12))
self.assertEqual("0.000000000005", scale_to_double(5, 12))
self.assertEqual("0.00000000002", scale_to_double(20, 12))
self.assertEqual("0.00000000025", scale_to_double(250, 12))
# self.assertEqual("-64.391", scale_to_double(-6439100000, scale))
# self.assertEqual("-6.439", scale_to_double(-643900000, scale))
# self.assertEqual("-64.1", scale_to_double(-6410000000, scale))
# self.assertEqual("-64.99999999", scale_to_double(-6499999999, scale))
# self.assertEqual("-64.00000001", scale_to_double(-6400000001, scale))
# self.assertEqual("-1.45622", scale_to_double(-145622000, scale))
self.assertEqual("-1234567890.0", scale_to_double(-1234567890, 0))
self.assertEqual("-1234567890125.0", scale_to_double(-1234567890125, 0))
self.assertEqual("-1234567890123456.0", scale_to_double(-1234567890123456, 0))
self.assertEqual("-1234567890250000.0", scale_to_double(-1234567890250000, 0))
self.assertEqual("-1234567890250002.0", scale_to_double(-1234567890250002, 0))
self.assertEqual("-1234567890250020.0", scale_to_double(-1234567890250020, 0))
self.assertEqual("0.0", scale_to_double(-0, 0))
self.assertEqual("-5.0", scale_to_double(-5, 0))
self.assertEqual("-20.0", scale_to_double(-20, 0))
self.assertEqual("-250.0", scale_to_double(-250, 0))
self.assertEqual("-123456789.0", scale_to_double(-1234567890, 1))
self.assertEqual("-123456789012.5", scale_to_double(-1234567890125, 1))
self.assertEqual("-123456789012345.6", scale_to_double(-1234567890123456, 1))
self.assertEqual("-123456789025000.0", scale_to_double(-1234567890250000, 1))
self.assertEqual("-123456789025000.2", scale_to_double(-1234567890250002, 1))
self.assertEqual("-123456789025002.0", scale_to_double(-1234567890250020, 1))
self.assertEqual("0.0", scale_to_double(-0, 1))
self.assertEqual("-0.5", scale_to_double(-5, 1))
self.assertEqual("-2.0", scale_to_double(-20, 1))
self.assertEqual("-25.0", scale_to_double(-250, 1))
self.assertEqual("-12345678.9", scale_to_double(-1234567890, 2))
self.assertEqual("-12345678901.25", scale_to_double(-1234567890125, 2))
self.assertEqual("-12345678901234.56", scale_to_double(-1234567890123456, 2))
self.assertEqual("-12345678902500.0", scale_to_double(-1234567890250000, 2))
self.assertEqual("-12345678902500.02", scale_to_double(-1234567890250002, 2))
self.assertEqual("-12345678902500.2", scale_to_double(-1234567890250020, 2))
self.assertEqual("0.0", scale_to_double(-0, 2))
self.assertEqual("-0.05", scale_to_double(-5, 2))
self.assertEqual("-0.2", scale_to_double(-20, 2))
self.assertEqual("-2.5", scale_to_double(-250, 2))
self.assertEqual("-1234567.89", scale_to_double(-1234567890, 3))
self.assertEqual("-1234567890.125", scale_to_double(-1234567890125, 3))
self.assertEqual("-1234567890123.456", scale_to_double(-1234567890123456, 3))
self.assertEqual("-1234567890250.0", scale_to_double(-1234567890250000, 3))
self.assertEqual("-1234567890250.002", scale_to_double(-1234567890250002, 3))
self.assertEqual("-1234567890250.02", scale_to_double(-1234567890250020, 3))
self.assertEqual("0.0", scale_to_double(-0, 3))
self.assertEqual("-0.005", scale_to_double(-5, 3))
self.assertEqual("-0.02", scale_to_double(-20, 3))
self.assertEqual("-0.25", scale_to_double(-250, 3))
self.assertEqual("-1234.56789", scale_to_double(-1234567890, 6))
self.assertEqual("-1234567.890125", scale_to_double(-1234567890125, 6))
self.assertEqual("-1234567890.123456", scale_to_double(-1234567890123456, 6))
self.assertEqual("-1234567890.25", scale_to_double(-1234567890250000, 6))
self.assertEqual("-1234567890.250002", scale_to_double(-1234567890250002, 6))
self.assertEqual("-1234567890.25002", scale_to_double(-1234567890250020, 6))
self.assertEqual("0.0", scale_to_double(-0, 6))
self.assertEqual("-0.000005", scale_to_double(-5, 6))
self.assertEqual("-0.00002", scale_to_double(-20, 6))
self.assertEqual("-0.00025", scale_to_double(-250, 6))
self.assertEqual("-0.00123456789", scale_to_double(-1234567890, 12))
self.assertEqual("-1.234567890125", scale_to_double(-1234567890125, 12))
self.assertEqual("-1234.567890123456", scale_to_double(-1234567890123456, 12))
self.assertEqual("-1234.56789025", scale_to_double(-1234567890250000, 12))
self.assertEqual("-1234.567890250002", scale_to_double(-1234567890250002, 12))
self.assertEqual("-1234.56789025002", scale_to_double(-1234567890250020, 12))
self.assertEqual("0.0", scale_to_double(-0, 12))
self.assertEqual("-0.000000000005", scale_to_double(-5, 12))
self.assertEqual("-0.00000000002", scale_to_double(-20, 12))
self.assertEqual("-0.00000000025", scale_to_double(-250, 12))
| 64.123288
| 86
| 0.698355
| 1,157
| 9,362
| 5.426966
| 0.07433
| 0.142698
| 0.26501
| 0.098105
| 0.932314
| 0.932314
| 0.932314
| 0.932314
| 0.932314
| 0.932314
| 0
| 0.326155
| 0.151784
| 9,362
| 145
| 87
| 64.565517
| 0.464551
| 0.042192
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0.967742
| 1
| 0.008065
| false
| 0
| 0.016129
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8a662752a9dfd3b14b6f496d56f8ac747f108a81
| 68
|
py
|
Python
|
onehalfpiece/main.py
|
ryanzhang/onehalfpiece
|
043d1a4b5659d9c709db23debb6c0193a5acf6d7
|
[
"MIT"
] | 3
|
2016-04-21T01:49:15.000Z
|
2019-02-25T13:16:41.000Z
|
onehalfpiece/main.py
|
ryanzhang/onehalfpiece
|
043d1a4b5659d9c709db23debb6c0193a5acf6d7
|
[
"MIT"
] | null | null | null |
onehalfpiece/main.py
|
ryanzhang/onehalfpiece
|
043d1a4b5659d9c709db23debb6c0193a5acf6d7
|
[
"MIT"
] | 1
|
2022-01-27T04:19:31.000Z
|
2022-01-27T04:19:31.000Z
|
#!/usr/bin/env python
def hello_world():
return 'Hello World!'
| 13.6
| 25
| 0.661765
| 10
| 68
| 4.4
| 0.8
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 68
| 4
| 26
| 17
| 0.785714
| 0.294118
| 0
| 0
| 0
| 0
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
8a73ae404d0807fd5c19001edb6b7ca5fbbb6d92
| 142,615
|
py
|
Python
|
models/pdeep2/model/modification.py
|
Shui-Group/TargetDIA
|
d697390728793c20dfe4426cce250c72b42cb0d2
|
[
"BSD-3-Clause"
] | 1
|
2020-11-11T03:28:39.000Z
|
2020-11-11T03:28:39.000Z
|
models/pdeep2/model/modification.py
|
Shui-Group/TargetDIA
|
d697390728793c20dfe4426cce250c72b42cb0d2
|
[
"BSD-3-Clause"
] | null | null | null |
models/pdeep2/model/modification.py
|
Shui-Group/TargetDIA
|
d697390728793c20dfe4426cce250c72b42cb0d2
|
[
"BSD-3-Clause"
] | null | null | null |
def get_modification():
mod_dict = dict() # {mod_name: elements}
mod_dict['2-dimethylsuccinyl[C]'] = 'C NORMAL 144.042259 144.042259 0 H(8)C(6)O(4)'
mod_dict['2-monomethylsuccinyl[C]'] = 'C NORMAL 130.026609 130.026609 0 H(6)C(5)O(4)'
mod_dict['2-nitrobenzyl[Y]'] = 'Y NORMAL 135.032028 135.032028 0 H(5)C(7)N(1)O(2)'
mod_dict['2-succinyl[C]'] = 'C NORMAL 116.010959 116.010959 0 H(4)C(4)O(4)'
mod_dict['2HPG[R]'] = 'R NORMAL 282.052824 282.052824 0 H(10)C(16)O(5)'
mod_dict['3-deoxyglucosone[R]'] = 'R NORMAL 144.042259 144.042259 0 H(8)C(6)O(4)'
mod_dict['3-phosphoglyceryl[K]'] = 'K NORMAL 167.982375 167.982375 0 H(5)C(3)O(6)P(1)'
mod_dict['3sulfo[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 183.983029 183.983029 0 H(4)C(7)O(4)S(1)'
mod_dict['4-ONE+Delta_H(-2)O(-1)[C]'] = 'C NORMAL 136.088815 136.088815 0 H(12)C(9)O(1)'
mod_dict['4-ONE+Delta_H(-2)O(-1)[H]'] = 'H NORMAL 136.088815 136.088815 0 H(12)C(9)O(1)'
mod_dict['4-ONE+Delta_H(-2)O(-1)[K]'] = 'K NORMAL 136.088815 136.088815 0 H(12)C(9)O(1)'
mod_dict['4-ONE[C]'] = 'C NORMAL 154.099380 154.099380 0 H(14)C(9)O(2)'
mod_dict['4-ONE[H]'] = 'H NORMAL 154.099380 154.099380 0 H(14)C(9)O(2)'
mod_dict['4-ONE[K]'] = 'K NORMAL 154.099380 154.099380 0 H(14)C(9)O(2)'
mod_dict['4AcAllylGal[C]'] = 'C NORMAL 372.142033 372.142033 0 H(24)C(17)O(9)'
mod_dict['ADP-Ribosyl[C]'] = 'C NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[D]'] = 'D NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[E]'] = 'E NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[K]'] = 'K NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[N]'] = 'N NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[R]'] = 'R NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['ADP-Ribosyl[S]'] = 'S NORMAL 541.061110 541.061110 0 H(21)C(15)N(5)O(13)P(2)'
mod_dict['AEBS[H]'] = 'H NORMAL 183.035399 183.035399 0 H(9)C(8)N(1)O(2)S(1)'
mod_dict['AEBS[K]'] = 'K NORMAL 183.035399 183.035399 0 H(9)C(8)N(1)O(2)S(1)'
mod_dict['AEBS[S]'] = 'S NORMAL 183.035399 183.035399 0 H(9)C(8)N(1)O(2)S(1)'
mod_dict['AEBS[Y]'] = 'Y NORMAL 183.035399 183.035399 0 H(9)C(8)N(1)O(2)S(1)'
mod_dict['AEBS[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 183.035399 183.035399 0 H(9)C(8)N(1)O(2)S(1)'
mod_dict['AEC-MAEC[S]'] = 'S NORMAL 59.019355 59.019355 0 H(5)C(2)N(1)O(-1)S(1)'
mod_dict['AEC-MAEC[T]'] = 'T NORMAL 59.019355 59.019355 0 H(5)C(2)N(1)O(-1)S(1)'
mod_dict['AEC-MAEC_2H(4)[S]'] = 'S NORMAL 63.044462 63.044462 0 H(1)2H(4)C(2)N(1)O(-1)S(1)'
mod_dict['AEC-MAEC_2H(4)[T]'] = 'T NORMAL 63.044462 63.044462 0 H(1)2H(4)C(2)N(1)O(-1)S(1)'
mod_dict['AHA-Alkyne-KDDDD[M]'] = 'M NORMAL 695.280074 695.280074 0 H(37)C(26)N(11)O(14)S(-1)'
mod_dict['AHA-Alkyne[M]'] = 'M NORMAL 107.077339 107.077339 0 H(5)C(4)N(5)O(1)S(-1)'
mod_dict['AHA-SS[M]'] = 'M NORMAL 195.075625 195.075625 0 H(9)C(7)N(5)O(2)'
mod_dict['AHA-SS_CAM[M]'] = 'M NORMAL 252.097088 252.097088 0 H(12)C(9)N(6)O(3)'
mod_dict['AMTzHexNAc2[N]'] = 'N NORMAL 502.202341 502.202341 0 H(30)C(19)N(6)O(10)'
mod_dict['AMTzHexNAc2[S]'] = 'S NORMAL 502.202341 502.202341 0 H(30)C(19)N(6)O(10)'
mod_dict['AMTzHexNAc2[T]'] = 'T NORMAL 502.202341 502.202341 0 H(30)C(19)N(6)O(10)'
mod_dict['AROD[C]'] = 'C NORMAL 820.336015 820.336015 0 H(52)C(35)N(10)O(9)S(2)'
mod_dict['AccQTag[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 170.048013 170.048013 0 H(6)C(10)N(2)O(1)'
mod_dict['AccQTag[K]'] = 'K NORMAL 170.048013 170.048013 0 H(6)C(10)N(2)O(1)'
mod_dict['Acetyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[C]'] = 'C NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[H]'] = 'H NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[K]'] = 'K NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[S]'] = 'S NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[T]'] = 'T NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[Y]'] = 'Y NORMAL 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 42.010565 42.010565 0 H(2)C(2)O(1)'
mod_dict['Acetyl_13C(2)[K]'] = 'K NORMAL 44.017274 44.017274 0 H(2)13C(2)O(1)'
mod_dict['Acetyl_13C(2)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 44.017274 44.017274 0 H(2)13C(2)O(1)'
mod_dict['Acetyl_2H(3)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[H]'] = 'H NORMAL 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[K]'] = 'K NORMAL 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[S]'] = 'S NORMAL 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[T]'] = 'T NORMAL 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[Y]'] = 'Y NORMAL 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyl_2H(3)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 45.029395 45.029395 0 H(-1)2H(3)C(2)O(1)'
mod_dict['Acetyldeoxyhypusine[K]'] = 'K NORMAL 97.089149 97.089149 0 H(11)C(6)N(1)'
mod_dict['Acetylhypusine[K]'] = 'K NORMAL 113.084064 113.084064 0 H(11)C(6)N(1)O(1)'
mod_dict['Ahx2+Hsl[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 309.205242 309.205242 0 H(27)C(16)N(3)O(3)'
mod_dict['Ala->Arg[A]'] = 'A NORMAL 85.063997 85.063997 0 H(7)C(3)N(3)'
mod_dict['Ala->Asn[A]'] = 'A NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Ala->Asp[A]'] = 'A NORMAL 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Ala->Cys[A]'] = 'A NORMAL 31.972071 31.972071 0 S(1)'
mod_dict['Ala->Gln[A]'] = 'A NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Ala->Glu[A]'] = 'A NORMAL 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['Ala->Gly[A]'] = 'A NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Ala->His[A]'] = 'A NORMAL 66.021798 66.021798 0 H(2)C(3)N(2)'
mod_dict['Ala->Lys[A]'] = 'A NORMAL 57.057849 57.057849 0 H(7)C(3)N(1)'
mod_dict['Ala->Met[A]'] = 'A NORMAL 60.003371 60.003371 0 H(4)C(2)S(1)'
mod_dict['Ala->Phe[A]'] = 'A NORMAL 76.031300 76.031300 0 H(4)C(6)'
mod_dict['Ala->Pro[A]'] = 'A NORMAL 26.015650 26.015650 0 H(2)C(2)'
mod_dict['Ala->Ser[A]'] = 'A NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Ala->Thr[A]'] = 'A NORMAL 30.010565 30.010565 0 H(2)C(1)O(1)'
mod_dict['Ala->Trp[A]'] = 'A NORMAL 115.042199 115.042199 0 H(5)C(8)N(1)'
mod_dict['Ala->Tyr[A]'] = 'A NORMAL 92.026215 92.026215 0 H(4)C(6)O(1)'
mod_dict['Ala->Val[A]'] = 'A NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Ala->Xle[A]'] = 'A NORMAL 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Amidated[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C -0.984016 -0.984016 0 H(1)N(1)O(-1)'
mod_dict['Amidated[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C -0.984016 -0.984016 0 H(1)N(1)O(-1)'
mod_dict['Amidine[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 41.026549 41.026549 0 H(3)C(2)N(1)'
mod_dict['Amidine[K]'] = 'K NORMAL 41.026549 41.026549 0 H(3)C(2)N(1)'
mod_dict['Amidino[C]'] = 'C NORMAL 42.021798 42.021798 0 H(2)C(1)N(2)'
mod_dict['Amino[Y]'] = 'Y NORMAL 15.010899 15.010899 0 H(1)N(1)'
mod_dict['Ammonia-loss[AnyN-termC]'] = 'C PEP_N -17.026549 -17.026549 0 H(-3)N(-1)'
mod_dict['Ammonia-loss[N]'] = 'N NORMAL -17.026549 -17.026549 0 H(-3)N(-1)'
mod_dict['Ammonia-loss[ProteinN-termS]'] = 'S PRO_N -17.026549 -17.026549 0 H(-3)N(-1)'
mod_dict['Ammonia-loss[ProteinN-termT]'] = 'T PRO_N -17.026549 -17.026549 0 H(-3)N(-1)'
mod_dict['Ammonium[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 17.026549 17.026549 0 H(3)N(1)'
mod_dict['Ammonium[D]'] = 'D NORMAL 17.026549 17.026549 0 H(3)N(1)'
mod_dict['Ammonium[E]'] = 'E NORMAL 17.026549 17.026549 0 H(3)N(1)'
mod_dict['Archaeol[C]'] = 'C NORMAL 634.662782 634.662782 0 H(86)C(43)O(2)'
mod_dict['Arg->Ala[R]'] = 'R NORMAL -85.063997 -85.063997 0 H(-7)C(-3)N(-3)'
mod_dict['Arg->Asn[R]'] = 'R NORMAL -42.058184 -42.058184 0 H(-6)C(-2)N(-2)O(1)'
mod_dict['Arg->Asp[R]'] = 'R NORMAL -41.074168 -41.074168 0 H(-7)C(-2)N(-3)O(2)'
mod_dict['Arg->Cys[R]'] = 'R NORMAL -53.091927 -53.091927 0 H(-7)C(-3)N(-3)S(1)'
mod_dict['Arg->Gln[R]'] = 'R NORMAL -28.042534 -28.042534 0 H(-4)C(-1)N(-2)O(1)'
mod_dict['Arg->Glu[R]'] = 'R NORMAL -27.058518 -27.058518 0 H(-5)C(-1)N(-3)O(2)'
mod_dict['Arg->GluSA[R]'] = 'R NORMAL -43.053433 -43.053433 0 H(-5)C(-1)N(-3)O(1)'
mod_dict['Arg->Gly[R]'] = 'R NORMAL -99.079647 -99.079647 0 H(-9)C(-4)N(-3)'
mod_dict['Arg->His[R]'] = 'R NORMAL -19.042199 -19.042199 0 H(-5)N(-1)'
mod_dict['Arg->Lys[R]'] = 'R NORMAL -28.006148 -28.006148 0 N(-2)'
mod_dict['Arg->Met[R]'] = 'R NORMAL -25.060626 -25.060626 0 H(-3)C(-1)N(-3)S(1)'
mod_dict['Arg->Npo[R]'] = 'R NORMAL 80.985078 80.985078 0 H(-1)C(3)N(1)O(2)'
mod_dict['Arg->Orn[R]'] = 'R NORMAL -42.021798 -42.021798 0 H(-2)C(-1)N(-2)'
mod_dict['Arg->Phe[R]'] = 'R NORMAL -9.032697 -9.032697 0 H(-3)C(3)N(-3)'
mod_dict['Arg->Pro[R]'] = 'R NORMAL -59.048347 -59.048347 0 H(-5)C(-1)N(-3)'
mod_dict['Arg->Ser[R]'] = 'R NORMAL -69.069083 -69.069083 0 H(-7)C(-3)N(-3)O(1)'
mod_dict['Arg->Thr[R]'] = 'R NORMAL -55.053433 -55.053433 0 H(-5)C(-2)N(-3)O(1)'
mod_dict['Arg->Trp[R]'] = 'R NORMAL 29.978202 29.978202 0 H(-2)C(5)N(-2)'
mod_dict['Arg->Tyr[R]'] = 'R NORMAL 6.962218 6.962218 0 H(-3)C(3)N(-3)O(1)'
mod_dict['Arg->Val[R]'] = 'R NORMAL -57.032697 -57.032697 0 H(-3)C(-1)N(-3)'
mod_dict['Arg->Xle[R]'] = 'R NORMAL -43.017047 -43.017047 0 H(-1)N(-3)'
mod_dict['Arg-loss[AnyC-termR]'] = 'R PEP_C -156.101111 -156.101111 0 H(-12)C(-6)N(-4)O(-1)'
mod_dict['Arg[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 156.101111 156.101111 0 H(12)C(6)N(4)O(1)'
mod_dict['Arg2PG[R]'] = 'R NORMAL 266.057909 266.057909 0 H(10)C(16)O(4)'
mod_dict['Argbiotinhydrazide[R]'] = 'R NORMAL 199.066699 199.066699 0 H(13)C(9)N(1)O(2)S(1)'
mod_dict['Asn->Ala[N]'] = 'N NORMAL -43.005814 -43.005814 0 H(-1)C(-1)N(-1)O(-1)'
mod_dict['Asn->Arg[N]'] = 'N NORMAL 42.058184 42.058184 0 H(6)C(2)N(2)O(-1)'
mod_dict['Asn->Cys[N]'] = 'N NORMAL -11.033743 -11.033743 0 H(-1)C(-1)N(-1)O(-1)S(1)'
mod_dict['Asn->Gly[N]'] = 'N NORMAL -57.021464 -57.021464 0 H(-3)C(-2)N(-1)O(-1)'
mod_dict['Asn->His[N]'] = 'N NORMAL 23.015984 23.015984 0 H(1)C(2)N(1)O(-1)'
mod_dict['Asn->Lys[N]'] = 'N NORMAL 14.052036 14.052036 0 H(6)C(2)O(-1)'
mod_dict['Asn->Met[N]'] = 'N NORMAL 16.997557 16.997557 0 H(3)C(1)N(-1)O(-1)S(1)'
mod_dict['Asn->Phe[N]'] = 'N NORMAL 33.025486 33.025486 0 H(3)C(5)N(-1)O(-1)'
mod_dict['Asn->Pro[N]'] = 'N NORMAL -16.990164 -16.990164 0 H(1)C(1)N(-1)O(-1)'
mod_dict['Asn->Ser[N]'] = 'N NORMAL -27.010899 -27.010899 0 H(-1)C(-1)N(-1)'
mod_dict['Asn->Thr[N]'] = 'N NORMAL -12.995249 -12.995249 0 H(1)N(-1)'
mod_dict['Asn->Trp[N]'] = 'N NORMAL 72.036386 72.036386 0 H(4)C(7)O(-1)'
mod_dict['Asn->Tyr[N]'] = 'N NORMAL 49.020401 49.020401 0 H(3)C(5)N(-1)'
mod_dict['Asn->Val[N]'] = 'N NORMAL -14.974514 -14.974514 0 H(3)C(1)N(-1)O(-1)'
mod_dict['Asn->Xle[N]'] = 'N NORMAL -0.958863 -0.958863 0 H(5)C(2)N(-1)O(-1)'
mod_dict['Asp->Ala[D]'] = 'D NORMAL -43.989829 -43.989829 0 C(-1)O(-2)'
mod_dict['Asp->Arg[D]'] = 'D NORMAL 41.074168 41.074168 0 H(7)C(2)N(3)O(-2)'
mod_dict['Asp->Asn[D]'] = 'D NORMAL -0.984016 -0.984016 0 H(1)N(1)O(-1)'
mod_dict['Asp->Cys[D]'] = 'D NORMAL -12.017759 -12.017759 0 C(-1)O(-2)S(1)'
mod_dict['Asp->Gln[D]'] = 'D NORMAL 13.031634 13.031634 0 H(3)C(1)N(1)O(-1)'
mod_dict['Asp->Gly[D]'] = 'D NORMAL -58.005479 -58.005479 0 H(-2)C(-2)O(-2)'
mod_dict['Asp->His[D]'] = 'D NORMAL 22.031969 22.031969 0 H(2)C(2)N(2)O(-2)'
mod_dict['Asp->Lys[D]'] = 'D NORMAL 13.068020 13.068020 0 H(7)C(2)N(1)O(-2)'
mod_dict['Asp->Met[D]'] = 'D NORMAL 16.013542 16.013542 0 H(4)C(1)O(-2)S(1)'
mod_dict['Asp->Phe[D]'] = 'D NORMAL 32.041471 32.041471 0 H(4)C(5)O(-2)'
mod_dict['Asp->Pro[D]'] = 'D NORMAL -17.974179 -17.974179 0 H(2)C(1)O(-2)'
mod_dict['Asp->Ser[D]'] = 'D NORMAL -27.994915 -27.994915 0 C(-1)O(-1)'
mod_dict['Asp->Thr[D]'] = 'D NORMAL -13.979265 -13.979265 0 H(2)O(-1)'
mod_dict['Asp->Trp[D]'] = 'D NORMAL 71.052370 71.052370 0 H(5)C(7)N(1)O(-2)'
mod_dict['Asp->Tyr[D]'] = 'D NORMAL 48.036386 48.036386 0 H(4)C(5)O(-1)'
mod_dict['Asp->Val[D]'] = 'D NORMAL -15.958529 -15.958529 0 H(4)C(1)O(-2)'
mod_dict['Asp->Xle[D]'] = 'D NORMAL -1.942879 -1.942879 0 H(6)C(2)O(-2)'
mod_dict['Atto495Maleimide[C]'] = 'C NORMAL 474.250515 474.250515 0 H(32)C(27)N(5)O(3)'
mod_dict['BADGE[C]'] = 'C NORMAL 340.167459 340.167459 0 H(24)C(21)O(4)'
mod_dict['BDMAPP[H]'] = 'H NORMAL 253.010225 253.010225 0 H(12)C(11)N(1)O(1)Br(1)'
mod_dict['BDMAPP[K]'] = 'K NORMAL 253.010225 253.010225 0 H(12)C(11)N(1)O(1)Br(1)'
mod_dict['BDMAPP[W]'] = 'W NORMAL 253.010225 253.010225 0 H(12)C(11)N(1)O(1)Br(1)'
mod_dict['BDMAPP[Y]'] = 'Y NORMAL 253.010225 253.010225 0 H(12)C(11)N(1)O(1)Br(1)'
mod_dict['BDMAPP[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 253.010225 253.010225 0 H(12)C(11)N(1)O(1)Br(1)'
mod_dict['BHT[C]'] = 'C NORMAL 218.167065 218.167065 0 H(22)C(15)O(1)'
mod_dict['BHT[H]'] = 'H NORMAL 218.167065 218.167065 0 H(22)C(15)O(1)'
mod_dict['BHT[K]'] = 'K NORMAL 218.167065 218.167065 0 H(22)C(15)O(1)'
mod_dict['BHTOH[C]'] = 'C NORMAL 234.161980 234.161980 0 H(22)C(15)O(2)'
mod_dict['BHTOH[H]'] = 'H NORMAL 234.161980 234.161980 0 H(22)C(15)O(2)'
mod_dict['BHTOH[K]'] = 'K NORMAL 234.161980 234.161980 0 H(22)C(15)O(2)'
mod_dict['BITC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 149.029920 149.029920 0 H(7)C(8)N(1)S(1)'
mod_dict['BITC[C]'] = 'C NORMAL 149.029920 149.029920 0 H(7)C(8)N(1)S(1)'
mod_dict['BITC[K]'] = 'K NORMAL 149.029920 149.029920 0 H(7)C(8)N(1)S(1)'
mod_dict['BMOE[C]'] = 'C NORMAL 220.048407 220.048407 0 H(8)C(10)N(2)O(4)'
mod_dict['BMP-piperidinol[C]'] = 'C NORMAL 263.131014 263.131014 0 H(17)C(18)N(1)O(1)'
mod_dict['BMP-piperidinol[M]'] = 'M NORMAL 263.131014 263.131014 0 H(17)C(18)N(1)O(1)'
mod_dict['Bacillosamine[N]'] = 'N NORMAL 228.111007 228.111007 0 H(16)C(10)N(2)O(4)'
mod_dict['Benzoyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 104.026215 104.026215 0 H(4)C(7)O(1)'
mod_dict['Benzoyl[K]'] = 'K NORMAL 104.026215 104.026215 0 H(4)C(7)O(1)'
mod_dict['Biotin-HPDP[C]'] = 'C NORMAL 428.191582 428.191582 0 H(32)C(19)N(4)O(3)S(2)'
mod_dict['Biotin-PEG-PRA[M]'] = 'M NORMAL 578.317646 578.317646 0 H(42)C(26)N(8)O(7)'
mod_dict['Biotin-PEO-Amine[D]'] = 'D NORMAL 356.188212 356.188212 0 H(28)C(16)N(4)O(3)S(1)'
mod_dict['Biotin-PEO-Amine[E]'] = 'E NORMAL 356.188212 356.188212 0 H(28)C(16)N(4)O(3)S(1)'
mod_dict['Biotin-PEO-Amine[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 356.188212 356.188212 0 H(28)C(16)N(4)O(3)S(1)'
mod_dict['Biotin-phenacyl[C]'] = 'C NORMAL 626.263502 626.263502 0 H(38)C(29)N(8)O(6)S(1)'
mod_dict['Biotin-phenacyl[H]'] = 'H NORMAL 626.263502 626.263502 0 H(38)C(29)N(8)O(6)S(1)'
mod_dict['Biotin-phenacyl[S]'] = 'S NORMAL 626.263502 626.263502 0 H(38)C(29)N(8)O(6)S(1)'
mod_dict['Biotin[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 226.077598 226.077598 0 H(14)C(10)N(2)O(2)S(1)'
mod_dict['Biotin[K]'] = 'K NORMAL 226.077598 226.077598 0 H(14)C(10)N(2)O(2)S(1)'
mod_dict['Biotin_Cayman-10013[C]'] = 'C NORMAL 660.428442 660.428442 0 H(60)C(36)N(4)O(5)S(1)'
mod_dict['Biotin_Cayman-10141[C]'] = 'C NORMAL 626.386577 626.386577 0 H(54)C(35)N(4)O(4)S(1)'
mod_dict['Biotin_Invitrogen-M1602[C]'] = 'C NORMAL 523.210069 523.210069 0 H(33)C(23)N(5)O(7)S(1)'
mod_dict['Biotin_Sigma-B1267[C]'] = 'C NORMAL 449.173290 449.173290 0 H(27)C(20)N(5)O(5)S(1)'
mod_dict['Biotin_Thermo-21325[K]'] = 'K NORMAL 695.310118 695.310118 0 H(45)C(34)N(7)O(7)S(1)'
mod_dict['Biotin_Thermo-21345[Q]'] = 'Q NORMAL 311.166748 311.166748 0 H(25)C(15)N(3)O(2)S(1)'
mod_dict['Biotin_Thermo-21360[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 487.246455 487.246455 0 H(37)C(21)N(5)O(6)S(1)'
mod_dict['Biotin_Thermo-21901+2H2O[C]'] = 'C NORMAL 561.246849 561.246849 0 H(39)C(23)N(5)O(9)S(1)'
mod_dict['Biotin_Thermo-21901+H2O[C]'] = 'C NORMAL 543.236284 543.236284 0 H(37)C(23)N(5)O(8)S(1)'
mod_dict['Biotin_Thermo-21911[C]'] = 'C NORMAL 921.461652 921.461652 0 H(71)C(41)N(5)O(16)S(1)'
mod_dict['Biotin_Thermo-33033-H[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 546.208295 546.208295 0 H(34)C(25)N(6)O(4)S(2)'
mod_dict['Biotin_Thermo-33033[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 548.223945 548.223945 0 H(36)C(25)N(6)O(4)S(2)'
mod_dict['Biotin_Thermo-88310[K]'] = 'K NORMAL 196.121178 196.121178 0 H(16)C(10)N(2)O(2)'
mod_dict['Biotin_Thermo-88317[S]'] = 'S NORMAL 443.291294 443.291294 0 H(42)C(22)N(3)O(4)P(1)'
mod_dict['Biotin_Thermo-88317[Y]'] = 'Y NORMAL 443.291294 443.291294 0 H(42)C(22)N(3)O(4)P(1)'
mod_dict['BisANS[K]'] = 'K NORMAL 594.091928 594.091928 0 H(22)C(32)N(2)O(6)S(2)'
mod_dict['Bodipy[C]'] = 'C NORMAL 414.167478 414.167478 0 H(21)B(1)C(20)N(4)O(3)F(2)'
mod_dict['Bromo[F]'] = 'F NORMAL 77.910511 77.910511 0 H(-1)Br(1)'
mod_dict['Bromo[H]'] = 'H NORMAL 77.910511 77.910511 0 H(-1)Br(1)'
mod_dict['Bromo[W]'] = 'W NORMAL 77.910511 77.910511 0 H(-1)Br(1)'
mod_dict['Bromo[Y]'] = 'Y NORMAL 77.910511 77.910511 0 H(-1)Br(1)'
mod_dict['Bromobimane[C]'] = 'C NORMAL 190.074228 190.074228 0 H(10)C(10)N(2)O(2)'
mod_dict['Butyryl[K]'] = 'K NORMAL 70.041865 70.041865 0 H(6)C(4)O(1)'
mod_dict['C+12[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 12.000000 12.000000 0 C(1)'
mod_dict['C8-QAT[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 227.224915 227.224915 0 H(29)C(14)N(1)O(1)'
mod_dict['C8-QAT[K]'] = 'K NORMAL 227.224915 227.224915 0 H(29)C(14)N(1)O(1)'
mod_dict['CAF[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 135.983029 135.983029 0 H(4)C(3)O(4)S(1)'
mod_dict['CAMthiopropanoyl[K]'] = 'K NORMAL 145.019749 145.019749 0 H(7)C(5)N(1)O(2)S(1)'
mod_dict['CAMthiopropanoyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 145.019749 145.019749 0 H(7)C(5)N(1)O(2)S(1)'
mod_dict['CHDH[D]'] = 'D NORMAL 294.183109 294.183109 0 H(26)C(17)O(4)'
mod_dict['CLIP_TRAQ_2[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 141.098318 141.098318 0 H(12)C(6)13C(1)N(2)O(1)'
mod_dict['CLIP_TRAQ_2[K]'] = 'K NORMAL 141.098318 141.098318 0 H(12)C(6)13C(1)N(2)O(1)'
mod_dict['CLIP_TRAQ_2[Y]'] = 'Y NORMAL 141.098318 141.098318 0 H(12)C(6)13C(1)N(2)O(1)'
mod_dict['CLIP_TRAQ_3[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 271.148736 271.148736 0 H(20)C(11)13C(1)N(3)O(4)'
mod_dict['CLIP_TRAQ_3[K]'] = 'K NORMAL 271.148736 271.148736 0 H(20)C(11)13C(1)N(3)O(4)'
mod_dict['CLIP_TRAQ_3[Y]'] = 'Y NORMAL 271.148736 271.148736 0 H(20)C(11)13C(1)N(3)O(4)'
mod_dict['CLIP_TRAQ_4[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 244.101452 244.101452 0 H(15)C(9)13C(1)N(2)O(5)'
mod_dict['CLIP_TRAQ_4[K]'] = 'K NORMAL 244.101452 244.101452 0 H(15)C(9)13C(1)N(2)O(5)'
mod_dict['CLIP_TRAQ_4[Y]'] = 'Y NORMAL 244.101452 244.101452 0 H(15)C(9)13C(1)N(2)O(5)'
mod_dict['Can-FP-biotin[S]'] = 'S NORMAL 447.195679 447.195679 0 H(34)C(19)N(3)O(5)P(1)S(1)'
mod_dict['Can-FP-biotin[T]'] = 'T NORMAL 447.195679 447.195679 0 H(34)C(19)N(3)O(5)P(1)S(1)'
mod_dict['Can-FP-biotin[Y]'] = 'Y NORMAL 447.195679 447.195679 0 H(34)C(19)N(3)O(5)P(1)S(1)'
mod_dict['Carbamidomethyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[C]'] = 'C NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[D]'] = 'D NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[E]'] = 'E NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[H]'] = 'H NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[K]'] = 'K NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[S]'] = 'S NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[T]'] = 'T NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Carbamidomethyl[Y]'] = 'Y NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['CarbamidomethylDTT[C]'] = 'C NORMAL 209.018035 209.018035 0 H(11)C(6)N(1)O(3)S(2)'
mod_dict['Carbamyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[C]'] = 'C NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[K]'] = 'K NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[M]'] = 'M NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[R]'] = 'R NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[S]'] = 'S NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[T]'] = 'T NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[Y]'] = 'Y NORMAL 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbamyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 43.005814 43.005814 0 H(1)C(1)N(1)O(1)'
mod_dict['Carbofuran[S]'] = 'S NORMAL 58.029289 58.029289 0 H(4)C(2)N(1)O(1)'
mod_dict['Carboxy->Thiocarboxy[ProteinC-termG]'] = 'G PRO_C 15.977156 15.977156 0 O(-1)S(1)'
mod_dict['Carboxy[D]'] = 'D NORMAL 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Carboxy[E]'] = 'E NORMAL 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Carboxy[K]'] = 'K NORMAL 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Carboxy[W]'] = 'W NORMAL 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Carboxy[ProteinN-termM]'] = 'M PRO_N 43.989829 43.989829 0 C(1)O(2)'
mod_dict['Carboxyethyl[H]'] = 'H NORMAL 72.021129 72.021129 0 H(4)C(3)O(2)'
mod_dict['Carboxyethyl[K]'] = 'K NORMAL 72.021129 72.021129 0 H(4)C(3)O(2)'
mod_dict['Carboxymethyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['Carboxymethyl[C]'] = 'C NORMAL 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['Carboxymethyl[K]'] = 'K NORMAL 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['Carboxymethyl[W]'] = 'W NORMAL 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['CarboxymethylDMAP[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 162.079313 162.079313 0 H(10)C(9)N(2)O(1)'
mod_dict['CarboxymethylDTT[C]'] = 'C NORMAL 210.002050 210.002050 0 H(10)C(6)O(4)S(2)'
mod_dict['Carboxymethyl_13C(2)[C]'] = 'C NORMAL 60.012189 60.012189 0 H(2)13C(2)O(2)'
mod_dict['Cation_Ag[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 105.897267 105.897267 0 H(-1)Ag(1)'
mod_dict['Cation_Ag[D]'] = 'D NORMAL 105.897267 105.897267 0 H(-1)Ag(1)'
mod_dict['Cation_Ag[E]'] = 'E NORMAL 105.897267 105.897267 0 H(-1)Ag(1)'
mod_dict['Cation_Ca[II][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 37.946941 37.946941 0 H(-2)Ca(1)'
mod_dict['Cation_Ca[II][D]'] = 'D NORMAL 37.946941 37.946941 0 H(-2)Ca(1)'
mod_dict['Cation_Ca[II][E]'] = 'E NORMAL 37.946941 37.946941 0 H(-2)Ca(1)'
mod_dict['Cation_Cu[I][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 61.921774 61.921774 0 H(-1)Cu(1)'
mod_dict['Cation_Cu[I][D]'] = 'D NORMAL 61.921774 61.921774 0 H(-1)Cu(1)'
mod_dict['Cation_Cu[I][E]'] = 'E NORMAL 61.921774 61.921774 0 H(-1)Cu(1)'
mod_dict['Cation_Fe[II][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 53.919289 53.919289 0 H(-2)Fe(1)'
mod_dict['Cation_Fe[II][D]'] = 'D NORMAL 53.919289 53.919289 0 H(-2)Fe(1)'
mod_dict['Cation_Fe[II][E]'] = 'E NORMAL 53.919289 53.919289 0 H(-2)Fe(1)'
mod_dict['Cation_K[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 37.955882 37.955882 0 H(-1)K(1)'
mod_dict['Cation_K[D]'] = 'D NORMAL 37.955882 37.955882 0 H(-1)K(1)'
mod_dict['Cation_K[E]'] = 'E NORMAL 37.955882 37.955882 0 H(-1)K(1)'
mod_dict['Cation_Li[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 6.008178 6.008178 0 H(-1)Li(1)'
mod_dict['Cation_Li[D]'] = 'D NORMAL 6.008178 6.008178 0 H(-1)Li(1)'
mod_dict['Cation_Li[E]'] = 'E NORMAL 6.008178 6.008178 0 H(-1)Li(1)'
mod_dict['Cation_Mg[II][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 21.969392 21.969392 0 H(-2)Mg(1)'
mod_dict['Cation_Mg[II][D]'] = 'D NORMAL 21.969392 21.969392 0 H(-2)Mg(1)'
mod_dict['Cation_Mg[II][E]'] = 'E NORMAL 21.969392 21.969392 0 H(-2)Mg(1)'
mod_dict['Cation_Na[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 21.981943 21.981943 0 H(-1)Na(1)'
mod_dict['Cation_Na[D]'] = 'D NORMAL 21.981943 21.981943 0 H(-1)Na(1)'
mod_dict['Cation_Na[E]'] = 'E NORMAL 21.981943 21.981943 0 H(-1)Na(1)'
mod_dict['Cation_Ni[II][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 55.919696 55.919696 0 H(-2)Ni(1)'
mod_dict['Cation_Ni[II][D]'] = 'D NORMAL 55.919696 55.919696 0 H(-2)Ni(1)'
mod_dict['Cation_Ni[II][E]'] = 'E NORMAL 55.919696 55.919696 0 H(-2)Ni(1)'
mod_dict['Cation_Zn[II][AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 61.913495 61.913495 0 H(-2)Zn(1)'
mod_dict['Cation_Zn[II][D]'] = 'D NORMAL 61.913495 61.913495 0 H(-2)Zn(1)'
mod_dict['Cation_Zn[II][E]'] = 'E NORMAL 61.913495 61.913495 0 H(-2)Zn(1)'
mod_dict['Chlorination[Y]'] = 'Y NORMAL 34.968853 34.968853 0 Cl(1)'
mod_dict['Chlorpyrifos[S]'] = 'S NORMAL 153.013912 153.013912 0 H(10)C(4)O(2)P(1)S(1)'
mod_dict['Chlorpyrifos[T]'] = 'T NORMAL 153.013912 153.013912 0 H(10)C(4)O(2)P(1)S(1)'
mod_dict['Chlorpyrifos[Y]'] = 'Y NORMAL 153.013912 153.013912 0 H(10)C(4)O(2)P(1)S(1)'
mod_dict['Cholesterol[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 368.344302 368.344302 0 H(44)C(27)'
mod_dict['CoenzymeA[C]'] = 'C NORMAL 765.099560 765.099560 0 H(34)C(21)N(7)O(16)P(3)S(1)'
mod_dict['CresylSaligeninPhosphate[H]'] = 'H NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['CresylSaligeninPhosphate[K]'] = 'K NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['CresylSaligeninPhosphate[R]'] = 'R NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['CresylSaligeninPhosphate[S]'] = 'S NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['CresylSaligeninPhosphate[T]'] = 'T NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['CresylSaligeninPhosphate[Y]'] = 'Y NORMAL 276.055146 276.055146 0 H(13)C(14)O(4)P(1)'
mod_dict['Cresylphosphate[H]'] = 'H NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Cresylphosphate[K]'] = 'K NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Cresylphosphate[R]'] = 'R NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Cresylphosphate[S]'] = 'S NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Cresylphosphate[T]'] = 'T NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Cresylphosphate[Y]'] = 'Y NORMAL 170.013281 170.013281 0 H(7)C(7)O(3)P(1)'
mod_dict['Crotonaldehyde[C]'] = 'C NORMAL 70.041865 70.041865 0 H(6)C(4)O(1)'
mod_dict['Crotonaldehyde[H]'] = 'H NORMAL 70.041865 70.041865 0 H(6)C(4)O(1)'
mod_dict['Crotonyl[K]'] = 'K NORMAL 68.026215 68.026215 0 H(4)C(4)O(1)'
mod_dict['CuSMo[C]'] = 'C NORMAL 922.834855 922.834855 0 H(24)C(19)N(8)O(15)P(2)S(3)Cu(1)Mo(1)'
mod_dict['Cy3-maleimide[C]'] = 'C NORMAL 753.262796 753.262796 0 H(45)C(37)N(4)O(9)S(2)'
mod_dict['Cy3b-maleimide[C]'] = 'C NORMAL 682.246120 682.246120 0 H(38)C(37)N(4)O(7)S(1)'
mod_dict['CyDye-Cy3[C]'] = 'C NORMAL 672.298156 672.298156 0 H(44)C(37)N(4)O(6)S(1)'
mod_dict['CyDye-Cy5[C]'] = 'C NORMAL 684.298156 684.298156 0 H(44)C(38)N(4)O(6)S(1)'
mod_dict['Cyano[C]'] = 'C NORMAL 24.995249 24.995249 0 H(-1)C(1)N(1)'
mod_dict['Cys->Ala[C]'] = 'C NORMAL -31.972071 -31.972071 0 S(-1)'
mod_dict['Cys->Arg[C]'] = 'C NORMAL 53.091927 53.091927 0 H(7)C(3)N(3)S(-1)'
mod_dict['Cys->Asn[C]'] = 'C NORMAL 11.033743 11.033743 0 H(1)C(1)N(1)O(1)S(-1)'
mod_dict['Cys->Asp[C]'] = 'C NORMAL 12.017759 12.017759 0 C(1)O(2)S(-1)'
mod_dict['Cys->Dha[C]'] = 'C NORMAL -33.987721 -33.987721 0 H(-2)S(-1)'
mod_dict['Cys->Gln[C]'] = 'C NORMAL 25.049393 25.049393 0 H(3)C(2)N(1)O(1)S(-1)'
mod_dict['Cys->Glu[C]'] = 'C NORMAL 26.033409 26.033409 0 H(2)C(2)O(2)S(-1)'
mod_dict['Cys->Gly[C]'] = 'C NORMAL -45.987721 -45.987721 0 H(-2)C(-1)S(-1)'
mod_dict['Cys->His[C]'] = 'C NORMAL 34.049727 34.049727 0 H(2)C(3)N(2)S(-1)'
mod_dict['Cys->Lys[C]'] = 'C NORMAL 25.085779 25.085779 0 H(7)C(3)N(1)S(-1)'
mod_dict['Cys->Met[C]'] = 'C NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Cys->Oxoalanine[C]'] = 'C NORMAL -17.992806 -17.992806 0 H(-2)O(1)S(-1)'
mod_dict['Cys->Phe[C]'] = 'C NORMAL 44.059229 44.059229 0 H(4)C(6)S(-1)'
mod_dict['Cys->Pro[C]'] = 'C NORMAL -5.956421 -5.956421 0 H(2)C(2)S(-1)'
mod_dict['Cys->PyruvicAcid[ProteinN-termC]'] = 'C PRO_N -33.003705 -33.003705 0 H(-3)N(-1)O(1)S(-1)'
mod_dict['Cys->Ser[C]'] = 'C NORMAL -15.977156 -15.977156 0 O(1)S(-1)'
mod_dict['Cys->Thr[C]'] = 'C NORMAL -1.961506 -1.961506 0 H(2)C(1)O(1)S(-1)'
mod_dict['Cys->Trp[C]'] = 'C NORMAL 83.070128 83.070128 0 H(5)C(8)N(1)S(-1)'
mod_dict['Cys->Tyr[C]'] = 'C NORMAL 60.054144 60.054144 0 H(4)C(6)O(1)S(-1)'
mod_dict['Cys->Val[C]'] = 'C NORMAL -3.940771 -3.940771 0 H(4)C(2)S(-1)'
mod_dict['Cys->Xle[C]'] = 'C NORMAL 10.074880 10.074880 0 H(6)C(3)S(-1)'
mod_dict['Cys->ethylaminoAla[C]'] = 'C NORMAL 11.070128 11.070128 0 H(5)C(2)N(1)S(-1)'
mod_dict['Cys->methylaminoAla[C]'] = 'C NORMAL -2.945522 -2.945522 0 H(3)C(1)N(1)S(-1)'
mod_dict['Cysteinyl[C]'] = 'C NORMAL 119.004099 119.004099 0 H(5)C(3)N(1)O(2)S(1)'
mod_dict['Cytopiloyne+water[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[C]'] = 'C NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[K]'] = 'K NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[R]'] = 'R NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[S]'] = 'S NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[T]'] = 'T NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne+water[Y]'] = 'Y NORMAL 380.147118 380.147118 0 H(24)C(19)O(8)'
mod_dict['Cytopiloyne[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[C]'] = 'C NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[K]'] = 'K NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[P]'] = 'P NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[R]'] = 'R NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[S]'] = 'S NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['Cytopiloyne[Y]'] = 'Y NORMAL 362.136553 362.136553 0 H(22)C(19)O(7)'
mod_dict['DAET[S]'] = 'S NORMAL 87.050655 87.050655 0 H(9)C(4)N(1)O(-1)S(1)'
mod_dict['DAET[T]'] = 'T NORMAL 87.050655 87.050655 0 H(9)C(4)N(1)O(-1)S(1)'
mod_dict['DEDGFLYMVYASQETFG[K]'] = 'K NORMAL 1970.824411 1970.824411 1 18.010565 18.010565 H(122)C(89)N(18)O(31)S(1)'
mod_dict['DFDNB[K]'] = 'K NORMAL 203.998263 203.998263 0 H(2)C(6)N(2)O(4)F(2)'
mod_dict['DFDNB[N]'] = 'N NORMAL 203.998263 203.998263 0 H(2)C(6)N(2)O(4)F(2)'
mod_dict['DFDNB[Q]'] = 'Q NORMAL 203.998263 203.998263 0 H(2)C(6)N(2)O(4)F(2)'
mod_dict['DFDNB[R]'] = 'R NORMAL 203.998263 203.998263 0 H(2)C(6)N(2)O(4)F(2)'
mod_dict['DHP[C]'] = 'C NORMAL 118.065674 118.065674 0 H(8)C(8)N(1)'
mod_dict['DMPO[C]'] = 'C NORMAL 111.068414 111.068414 0 H(9)C(6)N(1)O(1)'
mod_dict['DMPO[H]'] = 'H NORMAL 111.068414 111.068414 0 H(9)C(6)N(1)O(1)'
mod_dict['DMPO[Y]'] = 'Y NORMAL 111.068414 111.068414 0 H(9)C(6)N(1)O(1)'
mod_dict['DNCB_hapten[C]'] = 'C NORMAL 166.001457 166.001457 0 H(2)C(6)N(2)O(4)'
mod_dict['DNCB_hapten[H]'] = 'H NORMAL 166.001457 166.001457 0 H(2)C(6)N(2)O(4)'
mod_dict['DNCB_hapten[K]'] = 'K NORMAL 166.001457 166.001457 0 H(2)C(6)N(2)O(4)'
mod_dict['DNCB_hapten[Y]'] = 'Y NORMAL 166.001457 166.001457 0 H(2)C(6)N(2)O(4)'
mod_dict['DNPS[C]'] = 'C NORMAL 198.981352 198.981352 0 H(3)C(6)N(2)O(4)S(1)'
mod_dict['DNPS[W]'] = 'W NORMAL 198.981352 198.981352 0 H(3)C(6)N(2)O(4)S(1)'
mod_dict['DTBP[K]'] = 'K NORMAL 87.014270 87.014270 0 H(5)C(3)N(1)S(1)'
mod_dict['DTBP[N]'] = 'N NORMAL 87.014270 87.014270 0 H(5)C(3)N(1)S(1)'
mod_dict['DTBP[Q]'] = 'Q NORMAL 87.014270 87.014270 0 H(5)C(3)N(1)S(1)'
mod_dict['DTBP[R]'] = 'R NORMAL 87.014270 87.014270 0 H(5)C(3)N(1)S(1)'
mod_dict['DTBP[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 87.014270 87.014270 0 H(5)C(3)N(1)S(1)'
mod_dict['DTT_C_2H(6)[C]'] = 'C NORMAL 126.062161 126.062161 0 H(2)2H(6)C(4)O(2)S(1)'
mod_dict['DTT_ST[S]'] = 'S NORMAL 136.001656 136.001656 0 H(8)C(4)O(1)S(2)'
mod_dict['DTT_ST[T]'] = 'T NORMAL 136.001656 136.001656 0 H(8)C(4)O(1)S(2)'
mod_dict['DTT_ST_2H(6)[S]'] = 'S NORMAL 142.039317 142.039317 0 H(2)2H(6)C(4)O(1)S(2)'
mod_dict['DTT_ST_2H(6)[T]'] = 'T NORMAL 142.039317 142.039317 0 H(2)2H(6)C(4)O(1)S(2)'
mod_dict['Dansyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 233.051049 233.051049 0 H(11)C(12)N(1)O(2)S(1)'
mod_dict['Dansyl[K]'] = 'K NORMAL 233.051049 233.051049 0 H(11)C(12)N(1)O(2)S(1)'
mod_dict['Dap-DSP[A]'] = 'A NORMAL 328.055148 328.055148 0 H(16)C(13)N(2)O(4)S(2)'
mod_dict['Dap-DSP[E]'] = 'E NORMAL 328.055148 328.055148 0 H(16)C(13)N(2)O(4)S(2)'
mod_dict['Dap-DSP[K]'] = 'K NORMAL 328.055148 328.055148 0 H(16)C(13)N(2)O(4)S(2)'
mod_dict['DeStreak[C]'] = 'C NORMAL 75.998285 75.998285 0 H(4)C(2)O(1)S(1)'
mod_dict['Deamidated[N]'] = 'N NORMAL 0.984016 0.984016 0 H(-1)N(-1)O(1)'
mod_dict['Deamidated[Q]'] = 'Q NORMAL 0.984016 0.984016 0 H(-1)N(-1)O(1)'
mod_dict['Deamidated[R]'] = 'R NORMAL 0.984016 0.984016 0 H(-1)N(-1)O(1)'
mod_dict['Deamidated[ProteinN-termF]'] = 'F PRO_N 0.984016 0.984016 0 H(-1)N(-1)O(1)'
mod_dict['Deamidated_18O(1)[Q]'] = 'Q NORMAL 2.988261 2.988261 0 H(-1)N(-1)18O(1)'
mod_dict['Decanoyl[S]'] = 'S NORMAL 154.135765 154.135765 0 H(18)C(10)O(1)'
mod_dict['Decanoyl[T]'] = 'T NORMAL 154.135765 154.135765 0 H(18)C(10)O(1)'
mod_dict['Dehydrated[AnyN-termC]'] = 'C PEP_N -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[D]'] = 'D NORMAL -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[S]'] = 'S NORMAL -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[T]'] = 'T NORMAL -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[Y]'] = 'Y NORMAL -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[ProteinC-termN]'] = 'N PRO_C -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydrated[ProteinC-termQ]'] = 'Q PRO_C -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Dehydro[C]'] = 'C NORMAL -1.007825 -1.007825 0 H(-1)'
mod_dict['Delta_H(1)O(-1)18O(1)[N]'] = 'N NORMAL 2.988261 2.988261 0 H(-1)N(-1)18O(1)'
mod_dict['Delta_H(2)C(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 26.015650 26.015650 0 H(2)C(2)'
mod_dict['Delta_H(2)C(2)[H]'] = 'H NORMAL 26.015650 26.015650 0 H(2)C(2)'
mod_dict['Delta_H(2)C(2)[K]'] = 'K NORMAL 26.015650 26.015650 0 H(2)C(2)'
mod_dict['Delta_H(2)C(2)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 26.015650 26.015650 0 H(2)C(2)'
mod_dict['Delta_H(2)C(3)[K]'] = 'K NORMAL 38.015650 38.015650 0 H(2)C(3)'
mod_dict['Delta_H(2)C(3)O(1)[K]'] = 'K NORMAL 54.010565 54.010565 0 H(2)C(3)O(1)'
mod_dict['Delta_H(2)C(5)[K]'] = 'K NORMAL 62.015650 62.015650 0 H(2)C(5)'
mod_dict['Delta_H(4)C(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Delta_H(4)C(2)[H]'] = 'H NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Delta_H(4)C(2)[K]'] = 'K NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Delta_H(4)C(2)O(-1)S(1)[S]'] = 'S NORMAL 44.008456 44.008456 0 H(4)C(2)O(-1)S(1)'
mod_dict['Delta_H(4)C(3)[H]'] = 'H NORMAL 40.031300 40.031300 0 H(4)C(3)'
mod_dict['Delta_H(4)C(3)[K]'] = 'K NORMAL 40.031300 40.031300 0 H(4)C(3)'
mod_dict['Delta_H(4)C(3)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 40.031300 40.031300 0 H(4)C(3)'
mod_dict['Delta_H(4)C(3)O(1)[C]'] = 'C NORMAL 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Delta_H(4)C(3)O(1)[H]'] = 'H NORMAL 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Delta_H(4)C(6)[K]'] = 'K NORMAL 76.031300 76.031300 0 H(4)C(6)'
mod_dict['Delta_H(5)C(2)[P]'] = 'P NORMAL 29.039125 29.039125 0 H(5)C(2)'
mod_dict['Delta_H(6)C(3)O(1)[C]'] = 'C NORMAL 58.041865 58.041865 0 H(6)C(3)O(1)'
mod_dict['Delta_H(6)C(3)O(1)[H]'] = 'H NORMAL 58.041865 58.041865 0 H(6)C(3)O(1)'
mod_dict['Delta_H(6)C(3)O(1)[K]'] = 'K NORMAL 58.041865 58.041865 0 H(6)C(3)O(1)'
mod_dict['Delta_H(6)C(3)O(1)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 58.041865 58.041865 0 H(6)C(3)O(1)'
mod_dict['Delta_H(6)C(6)O(1)[K]'] = 'K NORMAL 94.041865 94.041865 0 H(6)C(6)O(1)'
mod_dict['Delta_H(8)C(6)O(1)[L]'] = 'L NORMAL 96.057515 96.057515 0 H(8)C(6)O(1)'
mod_dict['Delta_H(8)C(6)O(1)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 96.057515 96.057515 0 H(8)C(6)O(1)'
mod_dict['Delta_H(8)C(6)O(2)[K]'] = 'K NORMAL 112.052430 112.052430 0 H(8)C(6)O(2)'
mod_dict['Delta_Hg(1)[C]'] = 'C NORMAL 201.970617 201.970617 0 Hg(1)'
mod_dict['Delta_S(-1)Se(1)[C]'] = 'C NORMAL 47.944449 47.944449 0 S(-1)Se(1)'
mod_dict['Delta_S(-1)Se(1)[M]'] = 'M NORMAL 47.944449 47.944449 0 S(-1)Se(1)'
mod_dict['Delta_Se(1)[C]'] = 'C NORMAL 79.916520 79.916520 0 Se(1)'
mod_dict['Deoxy[D]'] = 'D NORMAL -15.994915 -15.994915 0 O(-1)'
mod_dict['Deoxy[S]'] = 'S NORMAL -15.994915 -15.994915 0 O(-1)'
mod_dict['Deoxy[T]'] = 'T NORMAL -15.994915 -15.994915 0 O(-1)'
mod_dict['Deoxyhypusine[K]'] = 'K NORMAL 71.073499 71.073499 0 H(9)C(4)N(1)'
mod_dict['Dethiomethyl[M]'] = 'M NORMAL -48.003371 -48.003371 0 H(-4)C(-1)S(-1)'
mod_dict['DiART6plex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 217.162932 217.162932 0 H(20)C(7)13C(4)N(1)15N(1)O(2)'
mod_dict['DiART6plex[K]'] = 'K NORMAL 217.162932 217.162932 0 H(20)C(7)13C(4)N(1)15N(1)O(2)'
mod_dict['DiART6plex[Y]'] = 'Y NORMAL 217.162932 217.162932 0 H(20)C(7)13C(4)N(1)15N(1)O(2)'
mod_dict['DiART6plex[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 217.162932 217.162932 0 H(20)C(7)13C(4)N(1)15N(1)O(2)'
mod_dict['DiART6plex115[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 217.156612 217.156612 0 H(20)C(8)13C(3)15N(2)O(2)'
mod_dict['DiART6plex115[K]'] = 'K NORMAL 217.156612 217.156612 0 H(20)C(8)13C(3)15N(2)O(2)'
mod_dict['DiART6plex115[Y]'] = 'Y NORMAL 217.156612 217.156612 0 H(20)C(8)13C(3)15N(2)O(2)'
mod_dict['DiART6plex115[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 217.156612 217.156612 0 H(20)C(8)13C(3)15N(2)O(2)'
mod_dict['DiART6plex116/119[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 217.168776 217.168776 0 H(18)2H(2)C(9)13C(2)N(1)15N(1)O(2)'
mod_dict['DiART6plex116/119[K]'] = 'K NORMAL 217.168776 217.168776 0 H(18)2H(2)C(9)13C(2)N(1)15N(1)O(2)'
mod_dict['DiART6plex116/119[Y]'] = 'Y NORMAL 217.168776 217.168776 0 H(18)2H(2)C(9)13C(2)N(1)15N(1)O(2)'
mod_dict['DiART6plex116/119[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 217.168776 217.168776 0 H(18)2H(2)C(9)13C(2)N(1)15N(1)O(2)'
mod_dict['DiART6plex117[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 217.162456 217.162456 0 H(18)2H(2)C(10)13C(1)15N(2)O(2)'
mod_dict['DiART6plex117[K]'] = 'K NORMAL 217.162456 217.162456 0 H(18)2H(2)C(10)13C(1)15N(2)O(2)'
mod_dict['DiART6plex117[Y]'] = 'Y NORMAL 217.162456 217.162456 0 H(18)2H(2)C(10)13C(1)15N(2)O(2)'
mod_dict['DiART6plex117[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 217.162456 217.162456 0 H(18)2H(2)C(10)13C(1)15N(2)O(2)'
mod_dict['DiART6plex118[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 217.175096 217.175096 0 H(18)2H(2)C(8)13C(3)N(2)O(2)'
mod_dict['DiART6plex118[K]'] = 'K NORMAL 217.175096 217.175096 0 H(18)2H(2)C(8)13C(3)N(2)O(2)'
mod_dict['DiART6plex118[Y]'] = 'Y NORMAL 217.175096 217.175096 0 H(18)2H(2)C(8)13C(3)N(2)O(2)'
mod_dict['DiART6plex118[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 217.175096 217.175096 0 H(18)2H(2)C(8)13C(3)N(2)O(2)'
mod_dict['DiDehydro[C]'] = 'C NORMAL -2.015650 -2.015650 0 H(-2)'
mod_dict['DiLeu4plex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 145.132163 145.132163 0 H(13)2H(2)C(8)N(1)18O(1)'
mod_dict['DiLeu4plex[K]'] = 'K NORMAL 145.132163 145.132163 0 H(13)2H(2)C(8)N(1)18O(1)'
mod_dict['DiLeu4plex[Y]'] = 'Y NORMAL 145.132163 145.132163 0 H(13)2H(2)C(8)N(1)18O(1)'
mod_dict['DiLeu4plex115[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 145.120000 145.120000 0 H(15)C(7)13C(1)15N(1)18O(1)'
mod_dict['DiLeu4plex115[K]'] = 'K NORMAL 145.120000 145.120000 0 H(15)C(7)13C(1)15N(1)18O(1)'
mod_dict['DiLeu4plex115[Y]'] = 'Y NORMAL 145.120000 145.120000 0 H(15)C(7)13C(1)15N(1)18O(1)'
mod_dict['DiLeu4plex117[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 145.128307 145.128307 0 H(13)2H(2)C(7)13C(1)15N(1)O(1)'
mod_dict['DiLeu4plex117[K]'] = 'K NORMAL 145.128307 145.128307 0 H(13)2H(2)C(7)13C(1)15N(1)O(1)'
mod_dict['DiLeu4plex117[Y]'] = 'Y NORMAL 145.128307 145.128307 0 H(13)2H(2)C(7)13C(1)15N(1)O(1)'
mod_dict['DiLeu4plex118[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 145.140471 145.140471 0 H(11)2H(4)C(8)N(1)O(1)'
mod_dict['DiLeu4plex118[K]'] = 'K NORMAL 145.140471 145.140471 0 H(11)2H(4)C(8)N(1)O(1)'
mod_dict['DiLeu4plex118[Y]'] = 'Y NORMAL 145.140471 145.140471 0 H(11)2H(4)C(8)N(1)O(1)'
mod_dict['Diacylglycerol[C]'] = 'C NORMAL 576.511761 576.511761 0 H(68)C(37)O(4)'
mod_dict['Dibromo[Y]'] = 'Y NORMAL 155.821022 155.821022 0 H(-2)Br(2)'
mod_dict['Dicarbamidomethyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Dicarbamidomethyl[C]'] = 'C NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Dicarbamidomethyl[H]'] = 'H NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Dicarbamidomethyl[K]'] = 'K NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Dicarbamidomethyl[R]'] = 'R NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Didehydro[AnyC-termK]'] = 'K PEP_C -2.015650 -2.015650 0 H(-2)'
mod_dict['Didehydro[S]'] = 'S NORMAL -2.015650 -2.015650 0 H(-2)'
mod_dict['Didehydro[T]'] = 'T NORMAL -2.015650 -2.015650 0 H(-2)'
mod_dict['Didehydro[Y]'] = 'Y NORMAL -2.015650 -2.015650 0 H(-2)'
mod_dict['Didehydroretinylidene[K]'] = 'K NORMAL 264.187801 264.187801 0 H(24)C(20)'
mod_dict['Diethyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 56.062600 56.062600 0 H(8)C(4)'
mod_dict['Diethyl[K]'] = 'K NORMAL 56.062600 56.062600 0 H(8)C(4)'
mod_dict['Diethylphosphate[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[C]'] = 'C NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[H]'] = 'H NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[K]'] = 'K NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[S]'] = 'S NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[T]'] = 'T NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Diethylphosphate[Y]'] = 'Y NORMAL 136.028931 136.028931 0 H(9)C(4)O(3)P(1)'
mod_dict['Difuran[Y]'] = 'Y NORMAL 132.021129 132.021129 0 H(4)C(8)O(2)'
mod_dict['Dihydroxyimidazolidine[R]'] = 'R NORMAL 72.021129 72.021129 0 H(4)C(3)O(2)'
mod_dict['Diiodo[H]'] = 'H NORMAL 251.793296 251.793296 0 H(-2)I(2)'
mod_dict['Diiodo[Y]'] = 'Y NORMAL 251.793296 251.793296 0 H(-2)I(2)'
mod_dict['Diironsubcluster[C]'] = 'C NORMAL 342.786916 342.786916 0 H(-1)C(5)N(2)O(5)S(2)Fe(2)'
mod_dict['Diisopropylphosphate[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 164.060231 164.060231 0 H(13)C(6)O(3)P(1)'
mod_dict['Diisopropylphosphate[K]'] = 'K NORMAL 164.060231 164.060231 0 H(13)C(6)O(3)P(1)'
mod_dict['Diisopropylphosphate[S]'] = 'S NORMAL 164.060231 164.060231 0 H(13)C(6)O(3)P(1)'
mod_dict['Diisopropylphosphate[T]'] = 'T NORMAL 164.060231 164.060231 0 H(13)C(6)O(3)P(1)'
mod_dict['Diisopropylphosphate[Y]'] = 'Y NORMAL 164.060231 164.060231 0 H(13)C(6)O(3)P(1)'
mod_dict['Dimethyl[N]'] = 'N NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Dimethyl[R]'] = 'R NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Dimethyl[ProteinN-termP]'] = 'P PRO_N 28.031300 28.031300 0 H(4)C(2)'
mod_dict['DimethylArsino[C]'] = 'C NORMAL 103.960719 103.960719 0 H(5)C(2)As(1)'
mod_dict['Dimethyl_2H(4)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 32.056407 32.056407 0 2H(4)C(2)'
mod_dict['Dimethyl_2H(4)[K]'] = 'K NORMAL 32.056407 32.056407 0 2H(4)C(2)'
mod_dict['Dimethyl_2H(4)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 32.056407 32.056407 0 2H(4)C(2)'
mod_dict['Dimethyl_2H(4)13C(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 34.063117 34.063117 0 2H(4)13C(2)'
mod_dict['Dimethyl_2H(4)13C(2)[K]'] = 'K NORMAL 34.063117 34.063117 0 2H(4)13C(2)'
mod_dict['Dimethyl_2H(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 34.068961 34.068961 0 H(-2)2H(6)C(2)'
mod_dict['Dimethyl_2H(6)[K]'] = 'K NORMAL 34.068961 34.068961 0 H(-2)2H(6)C(2)'
mod_dict['Dimethyl_2H(6)[R]'] = 'R NORMAL 34.068961 34.068961 0 H(-2)2H(6)C(2)'
mod_dict['Dimethyl_2H(6)13C(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 36.075670 36.075670 0 H(-2)2H(6)13C(2)'
mod_dict['Dimethyl_2H(6)13C(2)[K]'] = 'K NORMAL 36.075670 36.075670 0 H(-2)2H(6)13C(2)'
mod_dict['Dimethyl_2H(6)13C(2)[R]'] = 'R NORMAL 36.075670 36.075670 0 H(-2)2H(6)13C(2)'
mod_dict['DimethylamineGMBS[C]'] = 'C NORMAL 267.158292 267.158292 0 H(21)C(13)N(3)O(3)'
mod_dict['DimethylpyrroleAdduct[K]'] = 'K NORMAL 78.046950 78.046950 0 H(6)C(6)'
mod_dict['Dioxidation[C]'] = 'C NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[F]'] = 'F NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[K]'] = 'K NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[M]'] = 'M NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[P]'] = 'P NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[R]'] = 'R NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[W]'] = 'W NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Dioxidation[Y]'] = 'Y NORMAL 31.989829 31.989829 0 O(2)'
mod_dict['Diphthamide[H]'] = 'H NORMAL 143.118438 143.118438 0 H(15)C(7)N(2)O(1)'
mod_dict['Dipyridyl[C]'] = 'C NORMAL 225.090212 225.090212 0 H(11)C(13)N(3)O(1)'
mod_dict['Dipyrrolylmethanemethyl[C]'] = 'C NORMAL 418.137616 418.137616 0 H(22)C(20)N(2)O(8)'
mod_dict['DyLight-maleimide[C]'] = 'C NORMAL 940.199900 940.199900 0 H(48)C(39)N(4)O(15)S(4)'
mod_dict['EDT-iodoacetyl-PEO-biotin[S]'] = 'S NORMAL 490.174218 490.174218 0 H(34)C(20)N(4)O(4)S(3)'
mod_dict['EDT-iodoacetyl-PEO-biotin[T]'] = 'T NORMAL 490.174218 490.174218 0 H(34)C(20)N(4)O(4)S(3)'
mod_dict['EDT-maleimide-PEO-biotin[S]'] = 'S NORMAL 601.206246 601.206246 0 H(39)C(25)N(5)O(6)S(3)'
mod_dict['EDT-maleimide-PEO-biotin[T]'] = 'T NORMAL 601.206246 601.206246 0 H(39)C(25)N(5)O(6)S(3)'
mod_dict['EGCG1[C]'] = 'C NORMAL 456.069261 456.069261 0 H(16)C(22)O(11)'
mod_dict['EGCG2[C]'] = 'C NORMAL 287.055563 287.055563 0 H(11)C(15)O(6)'
mod_dict['EHD-diphenylpentanone[C]'] = 'C NORMAL 266.130680 266.130680 0 H(18)C(18)O(2)'
mod_dict['EHD-diphenylpentanone[M]'] = 'M NORMAL 266.130680 266.130680 0 H(18)C(18)O(2)'
mod_dict['EQAT[C]'] = 'C NORMAL 184.157563 184.157563 0 H(20)C(10)N(2)O(1)'
mod_dict['EQAT_2H(5)[C]'] = 'C NORMAL 189.188947 189.188947 0 H(15)2H(5)C(10)N(2)O(1)'
mod_dict['EQIGG[K]'] = 'K NORMAL 484.228162 484.228162 0 H(32)C(20)N(6)O(8)'
mod_dict['ESP[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 338.177647 338.177647 0 H(26)C(16)N(4)O(2)S(1)'
mod_dict['ESP[K]'] = 'K NORMAL 338.177647 338.177647 0 H(26)C(16)N(4)O(2)S(1)'
mod_dict['ESP_2H(10)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 348.240414 348.240414 0 H(16)2H(10)C(16)N(4)O(2)S(1)'
mod_dict['ESP_2H(10)[K]'] = 'K NORMAL 348.240414 348.240414 0 H(16)2H(10)C(16)N(4)O(2)S(1)'
mod_dict['Ethanedithiol[S]'] = 'S NORMAL 75.980527 75.980527 0 H(4)C(2)O(-1)S(2)'
mod_dict['Ethanedithiol[T]'] = 'T NORMAL 75.980527 75.980527 0 H(4)C(2)O(-1)S(2)'
mod_dict['Ethanolamine[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 43.042199 43.042199 0 H(5)C(2)N(1)'
mod_dict['Ethanolamine[C]'] = 'C NORMAL 43.042199 43.042199 0 H(5)C(2)N(1)'
mod_dict['Ethanolamine[D]'] = 'D NORMAL 43.042199 43.042199 0 H(5)C(2)N(1)'
mod_dict['Ethanolamine[E]'] = 'E NORMAL 43.042199 43.042199 0 H(5)C(2)N(1)'
mod_dict['Ethanolyl[C]'] = 'C NORMAL 44.026215 44.026215 0 H(4)C(2)O(1)'
mod_dict['Ethanolyl[K]'] = 'K NORMAL 44.026215 44.026215 0 H(4)C(2)O(1)'
mod_dict['Ethanolyl[R]'] = 'R NORMAL 44.026215 44.026215 0 H(4)C(2)O(1)'
mod_dict['Ethoxyformyl[H]'] = 'H NORMAL 73.028954 73.028954 0 H(5)C(3)O(2)'
mod_dict['Ethyl+Deamidated[N]'] = 'N NORMAL 29.015316 29.015316 0 H(3)C(2)N(-1)O(1)'
mod_dict['Ethyl+Deamidated[Q]'] = 'Q NORMAL 29.015316 29.015316 0 H(3)C(2)N(-1)O(1)'
mod_dict['Ethyl[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Ethyl[D]'] = 'D NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Ethyl[E]'] = 'E NORMAL 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Ethyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 28.031300 28.031300 0 H(4)C(2)'
mod_dict['Ethylphosphate[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 107.997631 107.997631 0 H(5)C(2)O(3)P(1)'
mod_dict['Ethylphosphate[K]'] = 'K NORMAL 107.997631 107.997631 0 H(5)C(2)O(3)P(1)'
mod_dict['ExacTagAmine[K]'] = 'K NORMAL 1046.347854 1046.347854 0 H(52)C(25)13C(12)N(8)15N(6)O(19)S(1)'
mod_dict['ExacTagThiol[C]'] = 'C NORMAL 972.365219 972.365219 0 H(50)C(23)13C(12)N(8)15N(6)O(18)'
mod_dict['FAD[C]'] = 'C NORMAL 783.141486 783.141486 0 H(31)C(27)N(9)O(15)P(2)'
mod_dict['FAD[H]'] = 'H NORMAL 783.141486 783.141486 0 H(31)C(27)N(9)O(15)P(2)'
mod_dict['FAD[Y]'] = 'Y NORMAL 783.141486 783.141486 0 H(31)C(27)N(9)O(15)P(2)'
mod_dict['FMN[S]'] = 'S NORMAL 438.094051 438.094051 0 H(19)C(17)N(4)O(8)P(1)'
mod_dict['FMN[T]'] = 'T NORMAL 438.094051 438.094051 0 H(19)C(17)N(4)O(8)P(1)'
mod_dict['FMNC[C]'] = 'C NORMAL 456.104615 456.104615 0 H(21)C(17)N(4)O(9)P(1)'
mod_dict['FMNH[C]'] = 'C NORMAL 454.088965 454.088965 0 H(19)C(17)N(4)O(9)P(1)'
mod_dict['FMNH[H]'] = 'H NORMAL 454.088965 454.088965 0 H(19)C(17)N(4)O(9)P(1)'
mod_dict['FNEM[C]'] = 'C NORMAL 427.069202 427.069202 0 H(13)C(24)N(1)O(7)'
mod_dict['FP-Biotin[K]'] = 'K NORMAL 572.316129 572.316129 0 H(49)C(27)N(4)O(5)P(1)S(1)'
mod_dict['FP-Biotin[S]'] = 'S NORMAL 572.316129 572.316129 0 H(49)C(27)N(4)O(5)P(1)S(1)'
mod_dict['FP-Biotin[T]'] = 'T NORMAL 572.316129 572.316129 0 H(49)C(27)N(4)O(5)P(1)S(1)'
mod_dict['FP-Biotin[Y]'] = 'Y NORMAL 572.316129 572.316129 0 H(49)C(27)N(4)O(5)P(1)S(1)'
mod_dict['FTC[C]'] = 'C NORMAL 421.073241 421.073241 0 H(15)C(21)N(3)O(5)S(1)'
mod_dict['FTC[K]'] = 'K NORMAL 421.073241 421.073241 0 H(15)C(21)N(3)O(5)S(1)'
mod_dict['FTC[P]'] = 'P NORMAL 421.073241 421.073241 0 H(15)C(21)N(3)O(5)S(1)'
mod_dict['FTC[R]'] = 'R NORMAL 421.073241 421.073241 0 H(15)C(21)N(3)O(5)S(1)'
mod_dict['FTC[S]'] = 'S NORMAL 421.073241 421.073241 0 H(15)C(21)N(3)O(5)S(1)'
mod_dict['Farnesyl[C]'] = 'C NORMAL 204.187801 204.187801 0 H(24)C(15)'
mod_dict['Fluorescein[C]'] = 'C NORMAL 388.082112 388.082112 0 H(14)C(22)N(1)O(6)'
mod_dict['Fluoro[A]'] = 'A NORMAL 17.990578 17.990578 0 H(-1)F(1)'
mod_dict['Fluoro[F]'] = 'F NORMAL 17.990578 17.990578 0 H(-1)F(1)'
mod_dict['Fluoro[W]'] = 'W NORMAL 17.990578 17.990578 0 H(-1)F(1)'
mod_dict['Fluoro[Y]'] = 'Y NORMAL 17.990578 17.990578 0 H(-1)F(1)'
mod_dict['Formyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 27.994915 27.994915 0 C(1)O(1)'
mod_dict['Formyl[K]'] = 'K NORMAL 27.994915 27.994915 0 C(1)O(1)'
mod_dict['Formyl[S]'] = 'S NORMAL 27.994915 27.994915 0 C(1)O(1)'
mod_dict['Formyl[T]'] = 'T NORMAL 27.994915 27.994915 0 C(1)O(1)'
mod_dict['Formyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 27.994915 27.994915 0 C(1)O(1)'
mod_dict['FormylMet[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 159.035399 159.035399 0 H(9)C(6)N(1)O(2)S(1)'
mod_dict['Furan[Y]'] = 'Y NORMAL 66.010565 66.010565 0 H(2)C(4)O(1)'
mod_dict['G-H1[R]'] = 'R NORMAL 39.994915 39.994915 0 C(2)O(1)'
mod_dict['GGQ[K]'] = 'K NORMAL 242.101505 242.101505 0 H(14)C(9)N(4)O(4)'
mod_dict['GIST-Quat[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 127.099714 127.099714 1 59.073499 59.073499 H(13)C(7)N(1)O(1)'
mod_dict['GIST-Quat[K]'] = 'K NORMAL 127.099714 127.099714 1 59.073499 59.073499 H(13)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(3)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 130.118544 130.118544 1 62.092330 62.092330 H(10)2H(3)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(3)[K]'] = 'K NORMAL 130.118544 130.118544 1 62.092330 62.092330 H(10)2H(3)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 133.137375 133.137375 1 65.111160 65.111160 H(7)2H(6)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(6)[K]'] = 'K NORMAL 133.137375 133.137375 1 65.111160 65.111160 H(7)2H(6)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(9)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 136.156205 136.156205 1 68.129990 68.129990 H(4)2H(9)C(7)N(1)O(1)'
mod_dict['GIST-Quat_2H(9)[K]'] = 'K NORMAL 136.156205 136.156205 1 68.129990 68.129990 H(4)2H(9)C(7)N(1)O(1)'
mod_dict['GPIanchor[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 123.008530 123.008530 0 H(6)C(2)N(1)O(3)P(1)'
mod_dict['GeranylGeranyl[C]'] = 'C NORMAL 272.250401 272.250401 0 H(32)C(20)'
mod_dict['Gln->Ala[Q]'] = 'Q NORMAL -57.021464 -57.021464 0 H(-3)C(-2)N(-1)O(-1)'
mod_dict['Gln->Arg[Q]'] = 'Q NORMAL 28.042534 28.042534 0 H(4)C(1)N(2)O(-1)'
mod_dict['Gln->Asn[Q]'] = 'Q NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Gln->Asp[Q]'] = 'Q NORMAL -13.031634 -13.031634 0 H(-3)C(-1)N(-1)O(1)'
mod_dict['Gln->Cys[Q]'] = 'Q NORMAL -25.049393 -25.049393 0 H(-3)C(-2)N(-1)O(-1)S(1)'
mod_dict['Gln->Gly[Q]'] = 'Q NORMAL -71.037114 -71.037114 0 H(-5)C(-3)N(-1)O(-1)'
mod_dict['Gln->His[Q]'] = 'Q NORMAL 9.000334 9.000334 0 H(-1)C(1)N(1)O(-1)'
mod_dict['Gln->Lys[Q]'] = 'Q NORMAL 0.036386 0.036386 0 H(4)C(1)O(-1)'
mod_dict['Gln->Met[Q]'] = 'Q NORMAL 2.981907 2.981907 0 H(1)N(-1)O(-1)S(1)'
mod_dict['Gln->Phe[Q]'] = 'Q NORMAL 19.009836 19.009836 0 H(1)C(4)N(-1)O(-1)'
mod_dict['Gln->Pro[Q]'] = 'Q NORMAL -31.005814 -31.005814 0 H(-1)N(-1)O(-1)'
mod_dict['Gln->Ser[Q]'] = 'Q NORMAL -41.026549 -41.026549 0 H(-3)C(-2)N(-1)'
mod_dict['Gln->Thr[Q]'] = 'Q NORMAL -27.010899 -27.010899 0 H(-1)C(-1)N(-1)'
mod_dict['Gln->Trp[Q]'] = 'Q NORMAL 58.020735 58.020735 0 H(2)C(6)O(-1)'
mod_dict['Gln->Tyr[Q]'] = 'Q NORMAL 35.004751 35.004751 0 H(1)C(4)N(-1)'
mod_dict['Gln->Val[Q]'] = 'Q NORMAL -28.990164 -28.990164 0 H(1)N(-1)O(-1)'
mod_dict['Gln->Xle[Q]'] = 'Q NORMAL -14.974514 -14.974514 0 H(3)C(1)N(-1)O(-1)'
mod_dict['Gln->pyro-Glu[AnyN-termQ]'] = 'Q PEP_N -17.026549 -17.026549 0 H(-3)N(-1)'
mod_dict['Glu->Ala[E]'] = 'E NORMAL -58.005479 -58.005479 0 H(-2)C(-2)O(-2)'
mod_dict['Glu->Arg[E]'] = 'E NORMAL 27.058518 27.058518 0 H(5)C(1)N(3)O(-2)'
mod_dict['Glu->Asn[E]'] = 'E NORMAL -14.999666 -14.999666 0 H(-1)C(-1)N(1)O(-1)'
mod_dict['Glu->Asp[E]'] = 'E NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Glu->Cys[E]'] = 'E NORMAL -26.033409 -26.033409 0 H(-2)C(-2)O(-2)S(1)'
mod_dict['Glu->Gln[E]'] = 'E NORMAL -0.984016 -0.984016 0 H(1)N(1)O(-1)'
mod_dict['Glu->Gly[E]'] = 'E NORMAL -72.021129 -72.021129 0 H(-4)C(-3)O(-2)'
mod_dict['Glu->His[E]'] = 'E NORMAL 8.016319 8.016319 0 C(1)N(2)O(-2)'
mod_dict['Glu->Lys[E]'] = 'E NORMAL -0.947630 -0.947630 0 H(5)C(1)N(1)O(-2)'
mod_dict['Glu->Met[E]'] = 'E NORMAL 1.997892 1.997892 0 H(2)O(-2)S(1)'
mod_dict['Glu->Phe[E]'] = 'E NORMAL 18.025821 18.025821 0 H(2)C(4)O(-2)'
mod_dict['Glu->Pro[E]'] = 'E NORMAL -31.989829 -31.989829 0 O(-2)'
mod_dict['Glu->Ser[E]'] = 'E NORMAL -42.010565 -42.010565 0 H(-2)C(-2)O(-1)'
mod_dict['Glu->Thr[E]'] = 'E NORMAL -27.994915 -27.994915 0 C(-1)O(-1)'
mod_dict['Glu->Trp[E]'] = 'E NORMAL 57.036720 57.036720 0 H(3)C(6)N(1)O(-2)'
mod_dict['Glu->Tyr[E]'] = 'E NORMAL 34.020735 34.020735 0 H(2)C(4)O(-1)'
mod_dict['Glu->Val[E]'] = 'E NORMAL -29.974179 -29.974179 0 H(2)O(-2)'
mod_dict['Glu->Xle[E]'] = 'E NORMAL -15.958529 -15.958529 0 H(4)C(1)O(-2)'
mod_dict['Glu->pyro-Glu[AnyN-termE]'] = 'E PEP_N -18.010565 -18.010565 0 H(-2)O(-1)'
mod_dict['Glu[E]'] = 'E NORMAL 129.042593 129.042593 0 H(7)C(5)N(1)O(3)'
mod_dict['Glu[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 129.042593 129.042593 0 H(7)C(5)N(1)O(3)'
mod_dict['GluGlu[E]'] = 'E NORMAL 258.085186 258.085186 0 H(14)C(10)N(2)O(6)'
mod_dict['GluGlu[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 258.085186 258.085186 0 H(14)C(10)N(2)O(6)'
mod_dict['GluGluGlu[E]'] = 'E NORMAL 387.127779 387.127779 0 H(21)C(15)N(3)O(9)'
mod_dict['GluGluGlu[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 387.127779 387.127779 0 H(21)C(15)N(3)O(9)'
mod_dict['GluGluGluGlu[E]'] = 'E NORMAL 516.170373 516.170373 0 H(28)C(20)N(4)O(12)'
mod_dict['GluGluGluGlu[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 516.170373 516.170373 0 H(28)C(20)N(4)O(12)'
mod_dict['Gluconoylation[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 178.047738 178.047738 0 H(10)C(6)O(6)'
mod_dict['Gluconoylation[K]'] = 'K NORMAL 178.047738 178.047738 0 H(10)C(6)O(6)'
mod_dict['Glucosylgalactosyl[K]'] = 'K NORMAL 340.100562 340.100562 2 324.105647 324.105647 162.052823 162.052823 O(1)Hex(2)'
mod_dict['Glucuronyl[S]'] = 'S NORMAL 176.032088 176.032088 0 H(8)C(6)O(6)'
mod_dict['Glucuronyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 176.032088 176.032088 0 H(8)C(6)O(6)'
mod_dict['Glutathione[C]'] = 'C NORMAL 305.068156 305.068156 0 H(15)C(10)N(3)O(6)S(1)'
mod_dict['Gly->Ala[G]'] = 'G NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Gly->Arg[G]'] = 'G NORMAL 99.079647 99.079647 0 H(9)C(4)N(3)'
mod_dict['Gly->Asn[G]'] = 'G NORMAL 57.021464 57.021464 0 H(3)C(2)N(1)O(1)'
mod_dict['Gly->Asp[G]'] = 'G NORMAL 58.005479 58.005479 0 H(2)C(2)O(2)'
mod_dict['Gly->Cys[G]'] = 'G NORMAL 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Gly->Gln[G]'] = 'G NORMAL 71.037114 71.037114 0 H(5)C(3)N(1)O(1)'
mod_dict['Gly->Glu[G]'] = 'G NORMAL 72.021129 72.021129 0 H(4)C(3)O(2)'
mod_dict['Gly->His[G]'] = 'G NORMAL 80.037448 80.037448 0 H(4)C(4)N(2)'
mod_dict['Gly->Lys[G]'] = 'G NORMAL 71.073499 71.073499 0 H(9)C(4)N(1)'
mod_dict['Gly->Met[G]'] = 'G NORMAL 74.019021 74.019021 0 H(6)C(3)S(1)'
mod_dict['Gly->Phe[G]'] = 'G NORMAL 90.046950 90.046950 0 H(6)C(7)'
mod_dict['Gly->Pro[G]'] = 'G NORMAL 40.031300 40.031300 0 H(4)C(3)'
mod_dict['Gly->Ser[G]'] = 'G NORMAL 30.010565 30.010565 0 H(2)C(1)O(1)'
mod_dict['Gly->Thr[G]'] = 'G NORMAL 44.026215 44.026215 0 H(4)C(2)O(1)'
mod_dict['Gly->Trp[G]'] = 'G NORMAL 129.057849 129.057849 0 H(7)C(9)N(1)'
mod_dict['Gly->Tyr[G]'] = 'G NORMAL 106.041865 106.041865 0 H(6)C(7)O(1)'
mod_dict['Gly->Val[G]'] = 'G NORMAL 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Gly->Xle[G]'] = 'G NORMAL 56.062600 56.062600 0 H(8)C(4)'
mod_dict['Gly-loss+Amide[AnyC-termG]'] = 'G PEP_C -58.005479 -58.005479 0 H(-2)C(-2)O(-2)'
mod_dict['GlyGly[S]'] = 'S NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['GlyGly[T]'] = 'T NORMAL 114.042927 114.042927 0 H(6)C(4)N(2)O(2)'
mod_dict['Glycerophospho[S]'] = 'S NORMAL 154.003110 154.003110 0 H(7)C(3)O(5)P(1)'
mod_dict['GlycerylPE[E]'] = 'E NORMAL 197.045310 197.045310 0 H(12)C(5)N(1)O(5)P(1)'
mod_dict['Glycosyl[P]'] = 'P NORMAL 148.037173 148.037173 0 H(8)C(5)O(5)'
mod_dict['Guanidinyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 42.021798 42.021798 0 H(2)C(1)N(2)'
mod_dict['Guanidinyl[K]'] = 'K NORMAL 42.021798 42.021798 0 H(2)C(1)N(2)'
mod_dict['HCysThiolactone[K]'] = 'K NORMAL 117.024835 117.024835 0 H(7)C(4)N(1)O(1)S(1)'
mod_dict['HCysteinyl[C]'] = 'C NORMAL 133.019749 133.019749 0 H(7)C(4)N(1)O(2)S(1)'
mod_dict['HMVK[C]'] = 'C NORMAL 86.036779 86.036779 0 H(6)C(4)O(2)'
mod_dict['HN2_mustard[C]'] = 'C NORMAL 101.084064 101.084064 0 H(11)C(5)N(1)O(1)'
mod_dict['HN2_mustard[H]'] = 'H NORMAL 101.084064 101.084064 0 H(11)C(5)N(1)O(1)'
mod_dict['HN2_mustard[K]'] = 'K NORMAL 101.084064 101.084064 0 H(11)C(5)N(1)O(1)'
mod_dict['HN3_mustard[C]'] = 'C NORMAL 131.094629 131.094629 0 H(13)C(6)N(1)O(2)'
mod_dict['HN3_mustard[H]'] = 'H NORMAL 131.094629 131.094629 0 H(13)C(6)N(1)O(2)'
mod_dict['HN3_mustard[K]'] = 'K NORMAL 131.094629 131.094629 0 H(13)C(6)N(1)O(2)'
mod_dict['HNE+Delta_H(2)[C]'] = 'C NORMAL 158.130680 158.130680 0 H(18)C(9)O(2)'
mod_dict['HNE+Delta_H(2)[H]'] = 'H NORMAL 158.130680 158.130680 0 H(18)C(9)O(2)'
mod_dict['HNE+Delta_H(2)[K]'] = 'K NORMAL 158.130680 158.130680 0 H(18)C(9)O(2)'
mod_dict['HNE-BAHAH[C]'] = 'C NORMAL 511.319226 511.319226 0 H(45)C(25)N(5)O(4)S(1)'
mod_dict['HNE-BAHAH[H]'] = 'H NORMAL 511.319226 511.319226 0 H(45)C(25)N(5)O(4)S(1)'
mod_dict['HNE-BAHAH[K]'] = 'K NORMAL 511.319226 511.319226 0 H(45)C(25)N(5)O(4)S(1)'
mod_dict['HNE-Delta_H(2)O[C]'] = 'C NORMAL 138.104465 138.104465 0 H(14)C(9)O(1)'
mod_dict['HNE-Delta_H(2)O[H]'] = 'H NORMAL 138.104465 138.104465 0 H(14)C(9)O(1)'
mod_dict['HNE-Delta_H(2)O[K]'] = 'K NORMAL 138.104465 138.104465 0 H(14)C(9)O(1)'
mod_dict['HNE[A]'] = 'A NORMAL 156.115030 156.115030 0 H(16)C(9)O(2)'
mod_dict['HNE[C]'] = 'C NORMAL 156.115030 156.115030 0 H(16)C(9)O(2)'
mod_dict['HNE[H]'] = 'H NORMAL 156.115030 156.115030 0 H(16)C(9)O(2)'
mod_dict['HNE[K]'] = 'K NORMAL 156.115030 156.115030 0 H(16)C(9)O(2)'
mod_dict['HNE[L]'] = 'L NORMAL 156.115030 156.115030 0 H(16)C(9)O(2)'
mod_dict['HPG[R]'] = 'R NORMAL 132.021129 132.021129 0 H(4)C(8)O(2)'
mod_dict['Heme[C]'] = 'C NORMAL 616.177295 616.177295 0 H(32)C(34)N(4)O(4)Fe(1)'
mod_dict['Heme[H]'] = 'H NORMAL 616.177295 616.177295 0 H(32)C(34)N(4)O(4)Fe(1)'
mod_dict['Hep[K]'] = 'K NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hep[N]'] = 'N NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hep[Q]'] = 'Q NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hep[R]'] = 'R NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hep[S]'] = 'S NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hep[T]'] = 'T NORMAL 192.063388 192.063388 0 Hep(1)'
mod_dict['Hex(1)HexNAc(1)NeuAc(1)[N]'] = 'N NORMAL 656.227613 656.227613 0 Hex(1)HexNAc(1)NeuAc(1)'
mod_dict['Hex(1)HexNAc(1)NeuAc(1)[S]'] = 'S NORMAL 656.227613 656.227613 0 Hex(1)HexNAc(1)NeuAc(1)'
mod_dict['Hex(1)HexNAc(1)NeuAc(1)[T]'] = 'T NORMAL 656.227613 656.227613 0 Hex(1)HexNAc(1)NeuAc(1)'
mod_dict['Hex(1)HexNAc(1)NeuAc(2)[N]'] = 'N NORMAL 947.323029 947.323029 0 Hex(1)HexNAc(1)NeuAc(2)'
mod_dict['Hex(1)HexNAc(1)NeuAc(2)[S]'] = 'S NORMAL 947.323029 947.323029 0 Hex(1)HexNAc(1)NeuAc(2)'
mod_dict['Hex(1)HexNAc(1)NeuAc(2)[T]'] = 'T NORMAL 947.323029 947.323029 0 Hex(1)HexNAc(1)NeuAc(2)'
mod_dict['Hex(1)HexNAc(1)dHex(1)[N]'] = 'N NORMAL 511.190105 511.190105 0 dHex(1)Hex(1)HexNAc(1)'
mod_dict['Hex(1)HexNAc(2)[N]'] = 'N NORMAL 568.211569 568.211569 0 Hex(1)HexNAc(2)'
mod_dict['Hex(1)HexNAc(2)Pent(1)[N]'] = 'N NORMAL 700.253828 700.253828 0 Pent(1)Hex(1)HexNAc(2)'
mod_dict['Hex(1)HexNAc(2)dHex(1)[N]'] = 'N NORMAL 714.269478 714.269478 0 dHex(1)Hex(1)HexNAc(2)'
mod_dict['Hex(1)HexNAc(2)dHex(1)Pent(1)[N]'] = 'N NORMAL 846.311736 846.311736 0 Pent(1)dHex(1)Hex(1)HexNAc(2)'
mod_dict['Hex(1)HexNAc(2)dHex(2)[N]'] = 'N NORMAL 860.327386 860.327386 0 dHex(2)Hex(1)HexNAc(2)'
mod_dict['Hex(2)[K]'] = 'K NORMAL 324.105647 324.105647 0 Hex(2)'
mod_dict['Hex(2)[R]'] = 'R NORMAL 324.105647 324.105647 0 Hex(2)'
mod_dict['Hex(2)HexNAc(2)[N]'] = 'N NORMAL 730.264392 730.264392 0 Hex(2)HexNAc(2)'
mod_dict['Hex(2)HexNAc(2)Pent(1)[N]'] = 'N NORMAL 862.306651 862.306651 0 Pent(1)Hex(2)HexNAc(2)'
mod_dict['Hex(2)HexNAc(2)dHex(1)[N]'] = 'N NORMAL 876.322301 876.322301 0 dHex(1)Hex(2)HexNAc(2)'
mod_dict['Hex(3)[N]'] = 'N NORMAL 486.158471 486.158471 0 Hex(3)'
mod_dict['Hex(3)HexNAc(1)Pent(1)[N]'] = 'N NORMAL 821.280102 821.280102 0 Pent(1)Hex(3)HexNAc(1)'
mod_dict['Hex(3)HexNAc(2)[N]'] = 'N NORMAL 892.317216 892.317216 0 Hex(3)HexNAc(2)'
mod_dict['Hex(3)HexNAc(2)P(1)[N]'] = 'N NORMAL 923.290978 923.290978 0 P(1)Hex(3)HexNAc(2)'
mod_dict['Hex(3)HexNAc(4)[N]'] = 'N NORMAL 1298.475961 1298.475961 0 Hex(3)HexNAc(4)'
mod_dict['Hex(4)HexNAc(4)[N]'] = 'N NORMAL 1460.528784 1460.528784 0 Hex(4)HexNAc(4)'
mod_dict['Hex(5)HexNAc(2)[N]'] = 'N NORMAL 1216.422863 1216.422863 0 Hex(5)HexNAc(2)'
mod_dict['Hex(5)HexNAc(4)[N]'] = 'N NORMAL 1622.581608 1622.581608 0 Hex(5)HexNAc(4)'
mod_dict['Hex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[C]'] = 'C NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[K]'] = 'K NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[N]'] = 'N NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[R]'] = 'R NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[S]'] = 'S NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[T]'] = 'T NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[W]'] = 'W NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex[Y]'] = 'Y NORMAL 162.052824 162.052824 0 Hex(1)'
mod_dict['Hex1HexNAc1[N]'] = 'N NORMAL 365.132196 365.132196 0 Hex(1)HexNAc(1)'
mod_dict['Hex1HexNAc1[S]'] = 'S NORMAL 365.132196 365.132196 0 Hex(1)HexNAc(1)'
mod_dict['Hex1HexNAc1[T]'] = 'T NORMAL 365.132196 365.132196 0 Hex(1)HexNAc(1)'
mod_dict['HexN[K]'] = 'K NORMAL 161.068808 161.068808 0 H(11)C(6)N(1)O(4)'
mod_dict['HexN[N]'] = 'N NORMAL 161.068808 161.068808 0 H(11)C(6)N(1)O(4)'
mod_dict['HexN[T]'] = 'T NORMAL 161.068808 161.068808 0 H(11)C(6)N(1)O(4)'
mod_dict['HexN[W]'] = 'W NORMAL 161.068808 161.068808 0 H(11)C(6)N(1)O(4)'
mod_dict['HexNAc(1)dHex(1)[N]'] = 'N NORMAL 349.137281 349.137281 0 dHex(1)HexNAc(1)'
mod_dict['HexNAc(1)dHex(2)[N]'] = 'N NORMAL 495.195190 495.195190 0 dHex(2)HexNAc(1)'
mod_dict['HexNAc(2)[N]'] = 'N NORMAL 406.158745 406.158745 0 HexNAc(2)'
mod_dict['HexNAc(2)dHex(1)[N]'] = 'N NORMAL 552.216654 552.216654 0 dHex(1)HexNAc(2)'
mod_dict['HexNAc(2)dHex(2)[N]'] = 'N NORMAL 698.274563 698.274563 0 dHex(2)HexNAc(2)'
mod_dict['HexNAc[N]'] = 'N NORMAL 203.079373 203.079373 0 HexNAc(1)'
mod_dict['HexNAc[S]'] = 'S NORMAL 203.079373 203.079373 0 HexNAc(1)'
mod_dict['HexNAc[T]'] = 'T NORMAL 203.079373 203.079373 0 HexNAc(1)'
mod_dict['His->Ala[H]'] = 'H NORMAL -66.021798 -66.021798 0 H(-2)C(-3)N(-2)'
mod_dict['His->Arg[H]'] = 'H NORMAL 19.042199 19.042199 0 H(5)N(1)'
mod_dict['His->Asn[H]'] = 'H NORMAL -23.015984 -23.015984 0 H(-1)C(-2)N(-1)O(1)'
mod_dict['His->Asp[H]'] = 'H NORMAL -22.031969 -22.031969 0 H(-2)C(-2)N(-2)O(2)'
mod_dict['His->Cys[H]'] = 'H NORMAL -34.049727 -34.049727 0 H(-2)C(-3)N(-2)S(1)'
mod_dict['His->Gln[H]'] = 'H NORMAL -9.000334 -9.000334 0 H(1)C(-1)N(-1)O(1)'
mod_dict['His->Glu[H]'] = 'H NORMAL -8.016319 -8.016319 0 C(-1)N(-2)O(2)'
mod_dict['His->Gly[H]'] = 'H NORMAL -80.037448 -80.037448 0 H(-4)C(-4)N(-2)'
mod_dict['His->Lys[H]'] = 'H NORMAL -8.963949 -8.963949 0 H(5)N(-1)'
mod_dict['His->Met[H]'] = 'H NORMAL -6.018427 -6.018427 0 H(2)C(-1)N(-2)S(1)'
mod_dict['His->Phe[H]'] = 'H NORMAL 10.009502 10.009502 0 H(2)C(3)N(-2)'
mod_dict['His->Pro[H]'] = 'H NORMAL -40.006148 -40.006148 0 C(-1)N(-2)'
mod_dict['His->Ser[H]'] = 'H NORMAL -50.026883 -50.026883 0 H(-2)C(-3)N(-2)O(1)'
mod_dict['His->Thr[H]'] = 'H NORMAL -36.011233 -36.011233 0 C(-2)N(-2)O(1)'
mod_dict['His->Trp[H]'] = 'H NORMAL 49.020401 49.020401 0 H(3)C(5)N(-1)'
mod_dict['His->Tyr[H]'] = 'H NORMAL 26.004417 26.004417 0 H(2)C(3)N(-2)O(1)'
mod_dict['His->Val[H]'] = 'H NORMAL -37.990498 -37.990498 0 H(2)C(-1)N(-2)'
mod_dict['His->Xle[H]'] = 'H NORMAL -23.974848 -23.974848 0 H(4)N(-2)'
mod_dict['Homocysteic_acid[M]'] = 'M NORMAL 33.969094 33.969094 0 H(-2)C(-1)O(3)'
mod_dict['Hydroxamic_acid[D]'] = 'D NORMAL 15.010899 15.010899 0 H(1)N(1)'
mod_dict['Hydroxamic_acid[E]'] = 'E NORMAL 15.010899 15.010899 0 H(1)N(1)'
mod_dict['Hydroxycinnamyl[C]'] = 'C NORMAL 146.036779 146.036779 0 H(6)C(9)O(2)'
mod_dict['Hydroxyfarnesyl[C]'] = 'C NORMAL 220.182715 220.182715 0 H(24)C(15)O(1)'
mod_dict['Hydroxyheme[E]'] = 'E NORMAL 614.161645 614.161645 0 H(30)C(34)N(4)O(4)Fe(1)'
mod_dict['Hydroxymethyl[N]'] = 'N NORMAL 30.010565 30.010565 0 H(2)C(1)O(1)'
mod_dict['HydroxymethylOP[K]'] = 'K NORMAL 108.021129 108.021129 0 H(4)C(6)O(2)'
mod_dict['Hydroxytrimethyl[K]'] = 'K NORMAL 59.049690 59.049690 0 H(7)C(3)O(1)'
mod_dict['Hypusine[K]'] = 'K NORMAL 87.068414 87.068414 0 H(9)C(4)N(1)O(1)'
mod_dict['IBTP[C]'] = 'C NORMAL 316.138088 316.138088 0 H(21)C(22)P(1)'
mod_dict['ICAT-C[C]'] = 'C NORMAL 227.126991 227.126991 0 H(17)C(10)N(3)O(3)'
mod_dict['ICAT-C_13C(9)[C]'] = 'C NORMAL 236.157185 236.157185 0 H(17)C(1)13C(9)N(3)O(3)'
mod_dict['ICAT-D[C]'] = 'C NORMAL 442.224991 442.224991 0 H(34)C(20)N(4)O(5)S(1)'
mod_dict['ICAT-D_2H(8)[C]'] = 'C NORMAL 450.275205 450.275205 0 H(26)2H(8)C(20)N(4)O(5)S(1)'
mod_dict['ICAT-G[C]'] = 'C NORMAL 486.251206 486.251206 0 H(38)C(22)N(4)O(6)S(1)'
mod_dict['ICAT-G_2H(8)[C]'] = 'C NORMAL 494.301420 494.301420 0 H(30)2H(8)C(22)N(4)O(6)S(1)'
mod_dict['ICAT-H[C]'] = 'C NORMAL 345.097915 345.097915 0 H(20)C(15)N(1)O(6)Cl(1)'
mod_dict['ICAT-H_13C(6)[C]'] = 'C NORMAL 351.118044 351.118044 0 H(20)C(9)13C(6)N(1)O(6)Cl(1)'
mod_dict['ICDID[C]'] = 'C NORMAL 138.068080 138.068080 0 H(10)C(8)O(2)'
mod_dict['ICDID_2H(6)[C]'] = 'C NORMAL 144.105740 144.105740 0 H(4)2H(6)C(8)O(2)'
mod_dict['ICPL[K]'] = 'K NORMAL 105.021464 105.021464 0 H(3)C(6)N(1)O(1)'
mod_dict['ICPL[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 105.021464 105.021464 0 H(3)C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 111.041593 111.041593 0 H(3)13C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)[K]'] = 'K NORMAL 111.041593 111.041593 0 H(3)13C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 111.041593 111.041593 0 H(3)13C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)2H(4)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 115.066700 115.066700 0 H(-1)2H(4)13C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)2H(4)[K]'] = 'K NORMAL 115.066700 115.066700 0 H(-1)2H(4)13C(6)N(1)O(1)'
mod_dict['ICPL_13C(6)2H(4)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 115.066700 115.066700 0 H(-1)2H(4)13C(6)N(1)O(1)'
mod_dict['ICPL_2H(4)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 109.046571 109.046571 0 H(-1)2H(4)C(6)N(1)O(1)'
mod_dict['ICPL_2H(4)[K]'] = 'K NORMAL 109.046571 109.046571 0 H(-1)2H(4)C(6)N(1)O(1)'
mod_dict['ICPL_2H(4)[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 109.046571 109.046571 0 H(-1)2H(4)C(6)N(1)O(1)'
mod_dict['IDEnT[C]'] = 'C NORMAL 214.990469 214.990469 0 H(7)C(9)N(1)O(1)Cl(2)'
mod_dict['IED-Biotin[C]'] = 'C NORMAL 326.141261 326.141261 0 H(22)C(14)N(4)O(3)S(1)'
mod_dict['IGBP[C]'] = 'C NORMAL 296.016039 296.016039 0 H(13)C(12)N(2)O(2)Br(1)'
mod_dict['IGBP_13C(2)[C]'] = 'C NORMAL 298.022748 298.022748 0 H(13)C(10)13C(2)N(2)O(2)Br(1)'
mod_dict['IMEHex(2)NeuAc[K]'] = 'K NORMAL 688.199683 688.199683 0 H(3)C(2)N(1)S(1)Hex(2)NeuAc(1)'
mod_dict['IMID[K]'] = 'K NORMAL 68.037448 68.037448 0 H(4)C(3)N(2)'
mod_dict['IMID_2H(4)[K]'] = 'K NORMAL 72.062555 72.062555 0 2H(4)C(3)N(2)'
mod_dict['ISD_z+2_ion[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N -15.010899 -15.010899 0 H(-1)N(-1)'
mod_dict['Iminobiotin[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 225.093583 225.093583 0 H(15)C(10)N(3)O(1)S(1)'
mod_dict['Iminobiotin[K]'] = 'K NORMAL 225.093583 225.093583 0 H(15)C(10)N(3)O(1)S(1)'
mod_dict['Iodo[H]'] = 'H NORMAL 125.896648 125.896648 0 H(-1)I(1)'
mod_dict['Iodo[Y]'] = 'Y NORMAL 125.896648 125.896648 0 H(-1)I(1)'
mod_dict['IodoU-AMP[F]'] = 'F NORMAL 322.020217 322.020217 0 H(11)C(9)N(2)O(9)P(1)'
mod_dict['IodoU-AMP[W]'] = 'W NORMAL 322.020217 322.020217 0 H(11)C(9)N(2)O(9)P(1)'
mod_dict['IodoU-AMP[Y]'] = 'Y NORMAL 322.020217 322.020217 0 H(11)C(9)N(2)O(9)P(1)'
mod_dict['Iodoacetanilide[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 133.052764 133.052764 0 H(7)C(8)N(1)O(1)'
mod_dict['Iodoacetanilide[C]'] = 'C NORMAL 133.052764 133.052764 0 H(7)C(8)N(1)O(1)'
mod_dict['Iodoacetanilide[K]'] = 'K NORMAL 133.052764 133.052764 0 H(7)C(8)N(1)O(1)'
mod_dict['Iodoacetanilide_13C(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 139.072893 139.072893 0 H(7)C(2)13C(6)N(1)O(1)'
mod_dict['Iodoacetanilide_13C(6)[C]'] = 'C NORMAL 139.072893 139.072893 0 H(7)C(2)13C(6)N(1)O(1)'
mod_dict['Iodoacetanilide_13C(6)[K]'] = 'K NORMAL 139.072893 139.072893 0 H(7)C(2)13C(6)N(1)O(1)'
mod_dict['Isopropylphospho[S]'] = 'S NORMAL 122.013281 122.013281 0 H(7)C(3)O(3)P(1)'
mod_dict['Isopropylphospho[T]'] = 'T NORMAL 122.013281 122.013281 0 H(7)C(3)O(3)P(1)'
mod_dict['Isopropylphospho[Y]'] = 'Y NORMAL 122.013281 122.013281 0 H(7)C(3)O(3)P(1)'
mod_dict['LG-Hlactam-K[K]'] = 'K NORMAL 348.193674 348.193674 0 H(28)C(20)O(5)'
mod_dict['LG-Hlactam-K[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 348.193674 348.193674 0 H(28)C(20)O(5)'
mod_dict['LG-Hlactam-R[R]'] = 'R NORMAL 306.171876 306.171876 0 H(26)C(19)N(-2)O(5)'
mod_dict['LG-anhydrolactam[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 314.188195 314.188195 0 H(26)C(20)O(3)'
mod_dict['LG-anhydrolactam[K]'] = 'K NORMAL 314.188195 314.188195 0 H(26)C(20)O(3)'
mod_dict['LG-anhyropyrrole[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 298.193280 298.193280 0 H(26)C(20)O(2)'
mod_dict['LG-anhyropyrrole[K]'] = 'K NORMAL 298.193280 298.193280 0 H(26)C(20)O(2)'
mod_dict['LG-lactam-K[K]'] = 'K NORMAL 332.198760 332.198760 0 H(28)C(20)O(4)'
mod_dict['LG-lactam-K[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 332.198760 332.198760 0 H(28)C(20)O(4)'
mod_dict['LG-lactam-R[R]'] = 'R NORMAL 290.176961 290.176961 0 H(26)C(19)N(-2)O(4)'
mod_dict['LG-pyrrole[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 316.203845 316.203845 0 H(28)C(20)O(3)'
mod_dict['LG-pyrrole[K]'] = 'K NORMAL 316.203845 316.203845 0 H(28)C(20)O(3)'
mod_dict['Label_13C(1)2H(3)+Oxidation[M]'] = 'M NORMAL 20.017100 20.017100 0 H(-3)2H(3)C(-1)13C(1)O(1)'
mod_dict['Label_13C(1)2H(3)[M]'] = 'M NORMAL 4.022185 4.022185 0 H(-3)2H(3)C(-1)13C(1)'
mod_dict['Label_13C(3)[A]'] = 'A NORMAL 3.010064 3.010064 0 C(-3)13C(3)'
mod_dict['Label_13C(3)15N(1)[A]'] = 'A NORMAL 4.007099 4.007099 0 C(-3)13C(3)N(-1)15N(1)'
mod_dict['Label_13C(4)+Oxidation[M]'] = 'M NORMAL 20.008334 20.008334 0 C(-4)13C(4)O(1)'
mod_dict['Label_13C(4)[M]'] = 'M NORMAL 4.013419 4.013419 0 C(-4)13C(4)'
mod_dict['Label_13C(4)15N(1)[D]'] = 'D NORMAL 5.010454 5.010454 0 C(-4)13C(4)N(-1)15N(1)'
mod_dict['Label_13C(4)15N(2)+GlyGly[K]'] = 'K NORMAL 120.050417 120.050417 0 H(6)13C(4)15N(2)O(2)'
mod_dict['Label_13C(5)[P]'] = 'P NORMAL 5.016774 5.016774 0 C(-5)13C(5)'
mod_dict['Label_13C(5)15N(1)[E]'] = 'E NORMAL 6.013809 6.013809 0 C(-5)13C(5)N(-1)15N(1)'
mod_dict['Label_13C(5)15N(1)[M]'] = 'M NORMAL 6.013809 6.013809 0 C(-5)13C(5)N(-1)15N(1)'
mod_dict['Label_13C(5)15N(1)[P]'] = 'P NORMAL 6.013809 6.013809 0 C(-5)13C(5)N(-1)15N(1)'
mod_dict['Label_13C(5)15N(1)[V]'] = 'V NORMAL 6.013809 6.013809 0 C(-5)13C(5)N(-1)15N(1)'
mod_dict['Label_13C(6)+Acetyl[K]'] = 'K NORMAL 48.030694 48.030694 0 H(2)C(-4)13C(6)O(1)'
mod_dict['Label_13C(6)+Dimethyl[K]'] = 'K NORMAL 34.051429 34.051429 0 H(4)C(-4)13C(6)'
mod_dict['Label_13C(6)+GlyGly[K]'] = 'K NORMAL 120.063056 120.063056 0 H(6)C(-2)13C(6)N(2)O(2)'
mod_dict['Label_13C(6)[I]'] = 'I NORMAL 6.020129 6.020129 0 C(-6)13C(6)'
mod_dict['Label_13C(6)[K]'] = 'K NORMAL 6.020129 6.020129 0 C(-6)13C(6)'
mod_dict['Label_13C(6)[L]'] = 'L NORMAL 6.020129 6.020129 0 C(-6)13C(6)'
mod_dict['Label_13C(6)[R]'] = 'R NORMAL 6.020129 6.020129 0 C(-6)13C(6)'
mod_dict['Label_13C(6)15N(1)[I]'] = 'I NORMAL 7.017164 7.017164 0 C(-6)13C(6)N(-1)15N(1)'
mod_dict['Label_13C(6)15N(1)[L]'] = 'L NORMAL 7.017164 7.017164 0 C(-6)13C(6)N(-1)15N(1)'
mod_dict['Label_13C(6)15N(2)+Acetyl[K]'] = 'K NORMAL 50.024764 50.024764 0 H(2)C(-4)13C(6)N(-2)15N(2)O(1)'
mod_dict['Label_13C(6)15N(2)+Dimethyl[K]'] = 'K NORMAL 36.045499 36.045499 0 H(4)C(-4)13C(6)N(-2)15N(2)'
mod_dict['Label_13C(6)15N(2)+GlyGly[K]'] = 'K NORMAL 122.057126 122.057126 0 H(6)C(-2)13C(6)15N(2)O(2)'
mod_dict['Label_13C(6)15N(2)[K]'] = 'K NORMAL 8.014199 8.014199 0 C(-6)13C(6)N(-2)15N(2)'
mod_dict['Label_13C(6)15N(2)[L]'] = 'L NORMAL 8.014199 8.014199 0 C(-6)13C(6)N(-2)15N(2)'
mod_dict['Label_13C(6)15N(4)+Dimethyl[R]'] = 'R NORMAL 38.039569 38.039569 0 H(4)C(-4)13C(6)N(-4)15N(4)'
mod_dict['Label_13C(6)15N(4)+Dimethyl_2H(6)13C(2)[R]'] = 'R NORMAL 46.083939 46.083939 0 H(-2)2H(6)C(-6)13C(8)N(-4)15N(4)'
mod_dict['Label_13C(6)15N(4)+Methyl[R]'] = 'R NORMAL 24.023919 24.023919 0 H(2)C(-5)13C(6)N(-4)15N(4)'
mod_dict['Label_13C(6)15N(4)+Methyl_2H(3)13C(1)[R]'] = 'R NORMAL 28.046104 28.046104 0 H(-1)2H(3)C(-6)13C(7)N(-4)15N(4)'
mod_dict['Label_13C(6)15N(4)[R]'] = 'R NORMAL 10.008269 10.008269 0 C(-6)13C(6)N(-4)15N(4)'
mod_dict['Label_13C(8)15N(2)[R]'] = 'R NORMAL 10.020909 10.020909 0 C(-8)13C(8)N(-2)15N(2)'
mod_dict['Label_13C(9)+Phospho[Y]'] = 'Y NORMAL 88.996524 88.996524 0 H(1)C(-9)13C(9)O(3)P(1)'
mod_dict['Label_13C(9)[F]'] = 'F NORMAL 9.030193 9.030193 0 C(-9)13C(9)'
mod_dict['Label_13C(9)[Y]'] = 'Y NORMAL 9.030193 9.030193 0 C(-9)13C(9)'
mod_dict['Label_13C(9)15N(1)[F]'] = 'F NORMAL 10.027228 10.027228 0 C(-9)13C(9)N(-1)15N(1)'
mod_dict['Label_15N(1)[A]'] = 'A NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[C]'] = 'C NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[D]'] = 'D NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[E]'] = 'E NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[F]'] = 'F NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[G]'] = 'G NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[I]'] = 'I NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[L]'] = 'L NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[M]'] = 'M NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[P]'] = 'P NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[S]'] = 'S NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[T]'] = 'T NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[V]'] = 'V NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(1)[Y]'] = 'Y NORMAL 0.997035 0.997035 0 N(-1)15N(1)'
mod_dict['Label_15N(2)[K]'] = 'K NORMAL 1.994070 1.994070 0 N(-2)15N(2)'
mod_dict['Label_15N(2)[N]'] = 'N NORMAL 1.994070 1.994070 0 N(-2)15N(2)'
mod_dict['Label_15N(2)[Q]'] = 'Q NORMAL 1.994070 1.994070 0 N(-2)15N(2)'
mod_dict['Label_15N(2)[W]'] = 'W NORMAL 1.994070 1.994070 0 N(-2)15N(2)'
mod_dict['Label_15N(2)2H(9)[K]'] = 'K NORMAL 11.050561 11.050561 0 H(-9)2H(9)N(-2)15N(2)'
mod_dict['Label_15N(3)[H]'] = 'H NORMAL 2.991105 2.991105 0 N(-3)15N(3)'
mod_dict['Label_15N(4)[R]'] = 'R NORMAL 3.988140 3.988140 0 N(-4)15N(4)'
mod_dict['Label_18O(1)[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 2.004246 2.004246 0 O(-1)18O(1)'
mod_dict['Label_18O(1)[S]'] = 'S NORMAL 2.004246 2.004246 0 O(-1)18O(1)'
mod_dict['Label_18O(1)[T]'] = 'T NORMAL 2.004246 2.004246 0 O(-1)18O(1)'
mod_dict['Label_18O(1)[Y]'] = 'Y NORMAL 2.004246 2.004246 0 O(-1)18O(1)'
mod_dict['Label_18O(2)[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 4.008491 4.008491 0 O(-2)18O(2)'
mod_dict['Label_2H(10)[L]'] = 'L NORMAL 10.062767 10.062767 0 H(-10)2H(10)'
mod_dict['Label_2H(3)+Oxidation[M]'] = 'M NORMAL 19.013745 19.013745 0 H(-3)2H(3)O(1)'
mod_dict['Label_2H(3)[L]'] = 'L NORMAL 3.018830 3.018830 0 H(-3)2H(3)'
mod_dict['Label_2H(3)[M]'] = 'M NORMAL 3.018830 3.018830 0 H(-3)2H(3)'
mod_dict['Label_2H(4)+Acetyl[K]'] = 'K NORMAL 46.035672 46.035672 0 H(-2)2H(4)C(2)O(1)'
mod_dict['Label_2H(4)+GlyGly[K]'] = 'K NORMAL 118.068034 118.068034 0 H(2)2H(4)C(4)N(2)O(2)'
mod_dict['Label_2H(4)[F]'] = 'F NORMAL 4.025107 4.025107 0 H(-4)2H(4)'
mod_dict['Label_2H(4)[K]'] = 'K NORMAL 4.025107 4.025107 0 H(-4)2H(4)'
mod_dict['Label_2H(4)[Y]'] = 'Y NORMAL 4.025107 4.025107 0 H(-4)2H(4)'
mod_dict['Label_2H(4)13C(1)[R]'] = 'R NORMAL 5.028462 5.028462 0 H(-4)2H(4)C(-1)13C(1)'
mod_dict['Label_2H(9)13C(6)15N(2)[K]'] = 'K NORMAL 17.070690 17.070690 0 H(-9)2H(9)C(-6)13C(6)N(-2)15N(2)'
mod_dict['Leu->MetOx[L]'] = 'L NORMAL 33.951335 33.951335 0 H(-2)C(-1)O(1)S(1)'
mod_dict['LeuArgGlyGly[K]'] = 'K NORMAL 383.228103 383.228103 0 H(29)C(16)N(7)O(4)'
mod_dict['Lipoyl[K]'] = 'K NORMAL 188.032956 188.032956 0 H(12)C(8)O(1)S(2)'
mod_dict['Lys->Ala[K]'] = 'K NORMAL -57.057849 -57.057849 0 H(-7)C(-3)N(-1)'
mod_dict['Lys->Allysine[K]'] = 'K NORMAL -1.031634 -1.031634 0 H(-3)N(-1)O(1)'
mod_dict['Lys->AminoadipicAcid[K]'] = 'K NORMAL 14.963280 14.963280 0 H(-3)N(-1)O(2)'
mod_dict['Lys->Arg[K]'] = 'K NORMAL 28.006148 28.006148 0 N(2)'
mod_dict['Lys->Asn[K]'] = 'K NORMAL -14.052036 -14.052036 0 H(-6)C(-2)O(1)'
mod_dict['Lys->Asp[K]'] = 'K NORMAL -13.068020 -13.068020 0 H(-7)C(-2)N(-1)O(2)'
mod_dict['Lys->CamCys[K]'] = 'K NORMAL 31.935685 31.935685 0 H(-4)C(-1)O(1)S(1)'
mod_dict['Lys->Cys[K]'] = 'K NORMAL -25.085779 -25.085779 0 H(-7)C(-3)N(-1)S(1)'
mod_dict['Lys->Gln[K]'] = 'K NORMAL -0.036386 -0.036386 0 H(-4)C(-1)O(1)'
mod_dict['Lys->Glu[K]'] = 'K NORMAL 0.947630 0.947630 0 H(-5)C(-1)N(-1)O(2)'
mod_dict['Lys->Gly[K]'] = 'K NORMAL -71.073499 -71.073499 0 H(-9)C(-4)N(-1)'
mod_dict['Lys->His[K]'] = 'K NORMAL 8.963949 8.963949 0 H(-5)N(1)'
mod_dict['Lys->Met[K]'] = 'K NORMAL 2.945522 2.945522 0 H(-3)C(-1)N(-1)S(1)'
mod_dict['Lys->MetOx[K]'] = 'K NORMAL 18.940436 18.940436 0 H(-3)C(-1)N(-1)O(1)S(1)'
mod_dict['Lys->Phe[K]'] = 'K NORMAL 18.973451 18.973451 0 H(-3)C(3)N(-1)'
mod_dict['Lys->Pro[K]'] = 'K NORMAL -31.042199 -31.042199 0 H(-5)C(-1)N(-1)'
mod_dict['Lys->Ser[K]'] = 'K NORMAL -41.062935 -41.062935 0 H(-7)C(-3)N(-1)O(1)'
mod_dict['Lys->Thr[K]'] = 'K NORMAL -27.047285 -27.047285 0 H(-5)C(-2)N(-1)O(1)'
mod_dict['Lys->Trp[K]'] = 'K NORMAL 57.984350 57.984350 0 H(-2)C(5)'
mod_dict['Lys->Tyr[K]'] = 'K NORMAL 34.968366 34.968366 0 H(-3)C(3)N(-1)O(1)'
mod_dict['Lys->Val[K]'] = 'K NORMAL -29.026549 -29.026549 0 H(-3)C(-1)N(-1)'
mod_dict['Lys->Xle[K]'] = 'K NORMAL -15.010899 -15.010899 0 H(-1)N(-1)'
mod_dict['Lys-loss[ProteinC-termK]'] = 'K PRO_C -128.094963 -128.094963 0 H(-12)C(-6)N(-2)O(-1)'
mod_dict['Lys[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 128.094963 128.094963 0 H(12)C(6)N(2)O(1)'
mod_dict['Lysbiotinhydrazide[K]'] = 'K NORMAL 241.088497 241.088497 0 H(15)C(10)N(3)O(2)S(1)'
mod_dict['MDCC[C]'] = 'C NORMAL 383.148121 383.148121 0 H(21)C(20)N(3)O(5)'
mod_dict['MG-H1[R]'] = 'R NORMAL 54.010565 54.010565 0 H(2)C(3)O(1)'
mod_dict['MM-diphenylpentanone[C]'] = 'C NORMAL 265.146664 265.146664 0 H(19)C(18)N(1)O(1)'
mod_dict['MTSL[C]'] = 'C NORMAL 184.079610 184.079610 0 H(14)C(9)N(1)O(1)S(1)'
mod_dict['Maleimide-PEO2-Biotin[C]'] = 'C NORMAL 525.225719 525.225719 0 H(35)C(23)N(5)O(7)S(1)'
mod_dict['Malonyl[C]'] = 'C NORMAL 86.000394 86.000394 0 H(2)C(3)O(3)'
mod_dict['Malonyl[S]'] = 'S NORMAL 86.000394 86.000394 0 H(2)C(3)O(3)'
mod_dict['Menadione-HQ[C]'] = 'C NORMAL 172.052430 172.052430 0 H(8)C(11)O(2)'
mod_dict['Menadione-HQ[K]'] = 'K NORMAL 172.052430 172.052430 0 H(8)C(11)O(2)'
mod_dict['Menadione[C]'] = 'C NORMAL 170.036779 170.036779 0 H(6)C(11)O(2)'
mod_dict['Menadione[K]'] = 'K NORMAL 170.036779 170.036779 0 H(6)C(11)O(2)'
mod_dict['MercaptoEthanol[S]'] = 'S NORMAL 60.003371 60.003371 0 H(4)C(2)S(1)'
mod_dict['MercaptoEthanol[T]'] = 'T NORMAL 60.003371 60.003371 0 H(4)C(2)S(1)'
mod_dict['Met->Aha[M]'] = 'M NORMAL -4.986324 -4.986324 0 H(-3)C(-1)N(3)S(-1)'
mod_dict['Met->Ala[M]'] = 'M NORMAL -60.003371 -60.003371 0 H(-4)C(-2)S(-1)'
mod_dict['Met->Arg[M]'] = 'M NORMAL 25.060626 25.060626 0 H(3)C(1)N(3)S(-1)'
mod_dict['Met->Asn[M]'] = 'M NORMAL -16.997557 -16.997557 0 H(-3)C(-1)N(1)O(1)S(-1)'
mod_dict['Met->Asp[M]'] = 'M NORMAL -16.013542 -16.013542 0 H(-4)C(-1)O(2)S(-1)'
mod_dict['Met->Cys[M]'] = 'M NORMAL -28.031300 -28.031300 0 H(-4)C(-2)'
mod_dict['Met->Gln[M]'] = 'M NORMAL -2.981907 -2.981907 0 H(-1)N(1)O(1)S(-1)'
mod_dict['Met->Glu[M]'] = 'M NORMAL -1.997892 -1.997892 0 H(-2)O(2)S(-1)'
mod_dict['Met->Gly[M]'] = 'M NORMAL -74.019021 -74.019021 0 H(-6)C(-3)S(-1)'
mod_dict['Met->His[M]'] = 'M NORMAL 6.018427 6.018427 0 H(-2)C(1)N(2)S(-1)'
mod_dict['Met->Hpg[M]'] = 'M NORMAL -21.987721 -21.987721 0 H(-2)C(1)S(-1)'
mod_dict['Met->Hse[AnyC-termM]'] = 'M PEP_C -29.992806 -29.992806 0 H(-2)C(-1)O(1)S(-1)'
mod_dict['Met->Hsl[AnyC-termM]'] = 'M PEP_C -48.003371 -48.003371 0 H(-4)C(-1)S(-1)'
mod_dict['Met->Lys[M]'] = 'M NORMAL -2.945522 -2.945522 0 H(3)C(1)N(1)S(-1)'
mod_dict['Met->Phe[M]'] = 'M NORMAL 16.027929 16.027929 0 C(4)S(-1)'
mod_dict['Met->Pro[M]'] = 'M NORMAL -33.987721 -33.987721 0 H(-2)S(-1)'
mod_dict['Met->Ser[M]'] = 'M NORMAL -44.008456 -44.008456 0 H(-4)C(-2)O(1)S(-1)'
mod_dict['Met->Thr[M]'] = 'M NORMAL -29.992806 -29.992806 0 H(-2)C(-1)O(1)S(-1)'
mod_dict['Met->Trp[M]'] = 'M NORMAL 55.038828 55.038828 0 H(1)C(6)N(1)S(-1)'
mod_dict['Met->Tyr[M]'] = 'M NORMAL 32.022844 32.022844 0 C(4)O(1)S(-1)'
mod_dict['Met->Val[M]'] = 'M NORMAL -31.972071 -31.972071 0 S(-1)'
mod_dict['Met->Xle[M]'] = 'M NORMAL -17.956421 -17.956421 0 H(2)C(1)S(-1)'
mod_dict['Met-loss+Acetyl[ProteinN-termM]'] = 'M PRO_N -89.029920 -89.029920 0 H(-7)C(-3)N(-1)S(-1)'
mod_dict['Met-loss[ProteinN-termM]'] = 'M PRO_N -131.040485 -131.040485 0 H(-9)C(-5)N(-1)O(-1)S(-1)'
mod_dict['Methyl+Acetyl_2H(3)[K]'] = 'K NORMAL 59.045045 59.045045 0 H(1)2H(3)C(3)O(1)'
mod_dict['Methyl+Deamidated[N]'] = 'N NORMAL 14.999666 14.999666 0 H(1)C(1)N(-1)O(1)'
mod_dict['Methyl+Deamidated[Q]'] = 'Q NORMAL 14.999666 14.999666 0 H(1)C(1)N(-1)O(1)'
mod_dict['Methyl-PEO12-Maleimide[C]'] = 'C NORMAL 710.383719 710.383719 0 H(58)C(32)N(2)O(15)'
mod_dict['Methyl[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[C]'] = 'C NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[D]'] = 'D NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[E]'] = 'E NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[H]'] = 'H NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[I]'] = 'I NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[K]'] = 'K NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[L]'] = 'L NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[N]'] = 'N NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[Q]'] = 'Q NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[R]'] = 'R NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[S]'] = 'S NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[T]'] = 'T NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Methyl_2H(2)[K]'] = 'K NORMAL 16.028204 16.028204 0 2H(2)C(1)'
mod_dict['Methyl_2H(3)+Acetyl_2H(3)[K]'] = 'K NORMAL 62.063875 62.063875 0 H(-2)2H(6)C(3)O(1)'
mod_dict['Methyl_2H(3)[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 17.034480 17.034480 0 H(-1)2H(3)C(1)'
mod_dict['Methyl_2H(3)[D]'] = 'D NORMAL 17.034480 17.034480 0 H(-1)2H(3)C(1)'
mod_dict['Methyl_2H(3)[E]'] = 'E NORMAL 17.034480 17.034480 0 H(-1)2H(3)C(1)'
mod_dict['Methyl_2H(3)[K]'] = 'K NORMAL 17.034480 17.034480 0 H(-1)2H(3)C(1)'
mod_dict['Methyl_2H(3)[R]'] = 'R NORMAL 17.034480 17.034480 0 H(-1)2H(3)C(1)'
mod_dict['Methyl_2H(3)13C(1)[R]'] = 'R NORMAL 18.037835 18.037835 0 H(-1)2H(3)13C(1)'
mod_dict['Methylamine[S]'] = 'S NORMAL 13.031634 13.031634 0 H(3)C(1)N(1)O(-1)'
mod_dict['Methylamine[T]'] = 'T NORMAL 13.031634 13.031634 0 H(3)C(1)N(1)O(-1)'
mod_dict['Methylmalonylation[S]'] = 'S NORMAL 100.016044 100.016044 0 H(4)C(4)O(3)'
mod_dict['Methylphosphonate[S]'] = 'S NORMAL 77.987066 77.987066 0 H(3)C(1)O(2)P(1)'
mod_dict['Methylphosphonate[T]'] = 'T NORMAL 77.987066 77.987066 0 H(3)C(1)O(2)P(1)'
mod_dict['Methylphosphonate[Y]'] = 'Y NORMAL 77.987066 77.987066 0 H(3)C(1)O(2)P(1)'
mod_dict['Methylpyrroline[K]'] = 'K NORMAL 109.052764 109.052764 0 H(7)C(6)N(1)O(1)'
mod_dict['Methylthio[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Methylthio[C]'] = 'C NORMAL 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Methylthio[D]'] = 'D NORMAL 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Methylthio[K]'] = 'K NORMAL 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Methylthio[N]'] = 'N NORMAL 45.987721 45.987721 0 H(2)C(1)S(1)'
mod_dict['Microcin[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 831.197041 831.197041 0 H(37)C(36)N(3)O(20)'
mod_dict['MicrocinC7[ProteinC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_C 386.110369 386.110369 0 H(19)C(13)N(6)O(6)P(1)'
mod_dict['Molybdopterin[C]'] = 'C NORMAL 521.884073 521.884073 0 H(11)C(10)N(5)O(8)P(1)S(2)Mo(1)'
mod_dict['MolybdopterinGD+Delta_S(-1)Se(1)[C]'] = 'C NORMAL 1620.930224 1620.930224 0 H(47)C(40)N(20)O(26)P(4)S(3)Se(1)Mo(1)'
mod_dict['MolybdopterinGD[C]'] = 'C NORMAL 1572.985775 1572.985775 0 H(47)C(40)N(20)O(26)P(4)S(4)Mo(1)'
mod_dict['MolybdopterinGD[D]'] = 'D NORMAL 1572.985775 1572.985775 0 H(47)C(40)N(20)O(26)P(4)S(4)Mo(1)'
mod_dict['MurNAc[A]'] = 'A NORMAL 275.100502 275.100502 0 H(17)C(11)N(1)O(7)'
mod_dict['Myristoleyl[ProteinN-termG]'] = 'G PRO_N 208.182715 208.182715 0 H(24)C(14)O(1)'
mod_dict['Myristoyl+Delta_H(-4)[ProteinN-termG]'] = 'G PRO_N 206.167065 206.167065 0 H(22)C(14)O(1)'
mod_dict['Myristoyl[AnyN-termG]'] = 'G PEP_N 210.198366 210.198366 0 H(26)C(14)O(1)'
mod_dict['Myristoyl[C]'] = 'C NORMAL 210.198366 210.198366 0 H(26)C(14)O(1)'
mod_dict['Myristoyl[K]'] = 'K NORMAL 210.198366 210.198366 0 H(26)C(14)O(1)'
mod_dict['N-dimethylphosphate[S]'] = 'S NORMAL 107.013615 107.013615 0 H(6)C(2)N(1)O(2)P(1)'
mod_dict['NA-LNO2[C]'] = 'C NORMAL 325.225309 325.225309 0 H(31)C(18)N(1)O(4)'
mod_dict['NA-LNO2[H]'] = 'H NORMAL 325.225309 325.225309 0 H(31)C(18)N(1)O(4)'
mod_dict['NA-OA-NO2[C]'] = 'C NORMAL 327.240959 327.240959 0 H(33)C(18)N(1)O(4)'
mod_dict['NA-OA-NO2[H]'] = 'H NORMAL 327.240959 327.240959 0 H(33)C(18)N(1)O(4)'
mod_dict['NBS[W]'] = 'W NORMAL 152.988449 152.988449 0 H(3)C(6)N(1)O(2)S(1)'
mod_dict['NBS_13C(6)[W]'] = 'W NORMAL 159.008578 159.008578 0 H(3)13C(6)N(1)O(2)S(1)'
mod_dict['NDA[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 175.042199 175.042199 0 H(5)C(13)N(1)'
mod_dict['NDA[K]'] = 'K NORMAL 175.042199 175.042199 0 H(5)C(13)N(1)'
mod_dict['NEIAA[C]'] = 'C NORMAL 85.052764 85.052764 0 H(7)C(4)N(1)O(1)'
mod_dict['NEIAA[Y]'] = 'Y NORMAL 85.052764 85.052764 0 H(7)C(4)N(1)O(1)'
mod_dict['NEIAA_2H(5)[C]'] = 'C NORMAL 90.084148 90.084148 0 H(2)2H(5)C(4)N(1)O(1)'
mod_dict['NEIAA_2H(5)[Y]'] = 'Y NORMAL 90.084148 90.084148 0 H(2)2H(5)C(4)N(1)O(1)'
mod_dict['NEM_2H(5)+H2O[C]'] = 'C NORMAL 148.089627 148.089627 0 H(4)2H(5)C(6)N(1)O(3)'
mod_dict['NEM_2H(5)[C]'] = 'C NORMAL 130.079062 130.079062 0 H(2)2H(5)C(6)N(1)O(2)'
mod_dict['NEMsulfur[C]'] = 'C NORMAL 157.019749 157.019749 0 H(7)C(6)N(1)O(2)S(1)'
mod_dict['NEMsulfurWater[C]'] = 'C NORMAL 175.030314 175.030314 0 H(9)C(6)N(1)O(3)S(1)'
mod_dict['NHS-LC-Biotin[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 339.161662 339.161662 0 H(25)C(16)N(3)O(3)S(1)'
mod_dict['NHS-LC-Biotin[K]'] = 'K NORMAL 339.161662 339.161662 0 H(25)C(16)N(3)O(3)S(1)'
mod_dict['NHS-fluorescein[K]'] = 'K NORMAL 471.131802 471.131802 0 H(21)C(27)N(1)O(7)'
mod_dict['NIC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 105.021464 105.021464 0 H(3)C(6)N(1)O(1)'
mod_dict['NIPCAM[C]'] = 'C NORMAL 99.068414 99.068414 0 H(9)C(5)N(1)O(1)'
mod_dict['NO_SMX_SEMD[C]'] = 'C NORMAL 252.044287 252.044287 0 H(10)C(10)N(3)O(3)S(1)'
mod_dict['NO_SMX_SIMD[C]'] = 'C NORMAL 267.031377 267.031377 0 H(9)C(10)N(3)O(4)S(1)'
mod_dict['NO_SMX_SMCT[C]'] = 'C NORMAL 268.039202 268.039202 0 H(10)C(10)N(3)O(4)S(1)'
mod_dict['Nethylmaleimide+water[C]'] = 'C NORMAL 143.058243 143.058243 0 H(9)C(6)N(1)O(3)'
mod_dict['Nethylmaleimide+water[K]'] = 'K NORMAL 143.058243 143.058243 0 H(9)C(6)N(1)O(3)'
mod_dict['Nethylmaleimide[C]'] = 'C NORMAL 125.047679 125.047679 0 H(7)C(6)N(1)O(2)'
mod_dict['NeuAc[N]'] = 'N NORMAL 291.095417 291.095417 0 NeuAc(1)'
mod_dict['NeuAc[S]'] = 'S NORMAL 291.095417 291.095417 0 NeuAc(1)'
mod_dict['NeuAc[T]'] = 'T NORMAL 291.095417 291.095417 0 NeuAc(1)'
mod_dict['NeuGc[N]'] = 'N NORMAL 307.090331 307.090331 0 NeuGc(1)'
mod_dict['NeuGc[S]'] = 'S NORMAL 307.090331 307.090331 0 NeuGc(1)'
mod_dict['NeuGc[T]'] = 'T NORMAL 307.090331 307.090331 0 NeuGc(1)'
mod_dict['Nitro[W]'] = 'W NORMAL 44.985078 44.985078 0 H(-1)N(1)O(2)'
mod_dict['Nitro[Y]'] = 'Y NORMAL 44.985078 44.985078 0 H(-1)N(1)O(2)'
mod_dict['Nitrosyl[C]'] = 'C NORMAL 28.990164 28.990164 0 H(-1)N(1)O(1)'
mod_dict['Nmethylmaleimide+water[C]'] = 'C NORMAL 129.042593 129.042593 0 H(7)C(5)N(1)O(3)'
mod_dict['Nmethylmaleimide[C]'] = 'C NORMAL 111.032028 111.032028 0 H(5)C(5)N(1)O(2)'
mod_dict['Nmethylmaleimide[K]'] = 'K NORMAL 111.032028 111.032028 0 H(5)C(5)N(1)O(2)'
mod_dict['O-Dimethylphosphate[S]'] = 'S NORMAL 107.997631 107.997631 0 H(5)C(2)O(3)P(1)'
mod_dict['O-Dimethylphosphate[T]'] = 'T NORMAL 107.997631 107.997631 0 H(5)C(2)O(3)P(1)'
mod_dict['O-Dimethylphosphate[Y]'] = 'Y NORMAL 107.997631 107.997631 0 H(5)C(2)O(3)P(1)'
mod_dict['O-Et-N-diMePhospho[S]'] = 'S NORMAL 135.044916 135.044916 0 H(10)C(4)N(1)O(2)P(1)'
mod_dict['O-Isopropylmethylphosphonate[S]'] = 'S NORMAL 120.034017 120.034017 0 H(9)C(4)O(2)P(1)'
mod_dict['O-Isopropylmethylphosphonate[T]'] = 'T NORMAL 120.034017 120.034017 0 H(9)C(4)O(2)P(1)'
mod_dict['O-Isopropylmethylphosphonate[Y]'] = 'Y NORMAL 120.034017 120.034017 0 H(9)C(4)O(2)P(1)'
mod_dict['O-Methylphosphate[S]'] = 'S NORMAL 93.981981 93.981981 0 H(3)C(1)O(3)P(1)'
mod_dict['O-Methylphosphate[T]'] = 'T NORMAL 93.981981 93.981981 0 H(3)C(1)O(3)P(1)'
mod_dict['O-Methylphosphate[Y]'] = 'Y NORMAL 93.981981 93.981981 0 H(3)C(1)O(3)P(1)'
mod_dict['O-pinacolylmethylphosphonate[H]'] = 'H NORMAL 162.080967 162.080967 0 H(15)C(7)O(2)P(1)'
mod_dict['O-pinacolylmethylphosphonate[K]'] = 'K NORMAL 162.080967 162.080967 0 H(15)C(7)O(2)P(1)'
mod_dict['O-pinacolylmethylphosphonate[S]'] = 'S NORMAL 162.080967 162.080967 0 H(15)C(7)O(2)P(1)'
mod_dict['O-pinacolylmethylphosphonate[T]'] = 'T NORMAL 162.080967 162.080967 0 H(15)C(7)O(2)P(1)'
mod_dict['O-pinacolylmethylphosphonate[Y]'] = 'Y NORMAL 162.080967 162.080967 0 H(15)C(7)O(2)P(1)'
mod_dict['Octanoyl[C]'] = 'C NORMAL 126.104465 126.104465 0 H(14)C(8)O(1)'
mod_dict['Octanoyl[S]'] = 'S NORMAL 126.104465 126.104465 0 H(14)C(8)O(1)'
mod_dict['Octanoyl[T]'] = 'T NORMAL 126.104465 126.104465 0 H(14)C(8)O(1)'
mod_dict['OxArgBiotin[R]'] = 'R NORMAL 310.135113 310.135113 0 H(22)C(15)N(2)O(3)S(1)'
mod_dict['OxArgBiotinRed[R]'] = 'R NORMAL 312.150763 312.150763 0 H(24)C(15)N(2)O(3)S(1)'
mod_dict['OxLysBiotin[K]'] = 'K NORMAL 352.156911 352.156911 0 H(24)C(16)N(4)O(3)S(1)'
mod_dict['OxLysBiotinRed[K]'] = 'K NORMAL 354.172562 354.172562 0 H(26)C(16)N(4)O(3)S(1)'
mod_dict['OxProBiotin[P]'] = 'P NORMAL 369.183461 369.183461 0 H(27)C(16)N(5)O(3)S(1)'
mod_dict['OxProBiotinRed[P]'] = 'P NORMAL 371.199111 371.199111 0 H(29)C(16)N(5)O(3)S(1)'
mod_dict['Oxidation+NEM[C]'] = 'C NORMAL 141.042593 141.042593 0 H(7)C(6)N(1)O(3)'
mod_dict['Oxidation[AnyC-termG]'] = 'G PEP_C 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[C]'] = 'C NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[D]'] = 'D NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[F]'] = 'F NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[H]'] = 'H NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[K]'] = 'K NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[M]'] = 'M NORMAL 15.994915 15.994915 1 63.998285 63.998285 O(1)'
mod_dict['Oxidation[N]'] = 'N NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[P]'] = 'P NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[R]'] = 'R NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[W]'] = 'W NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['Oxidation[Y]'] = 'Y NORMAL 15.994915 15.994915 0 O(1)'
mod_dict['PEITC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 163.045570 163.045570 0 H(9)C(9)N(1)S(1)'
mod_dict['PEITC[C]'] = 'C NORMAL 163.045570 163.045570 0 H(9)C(9)N(1)S(1)'
mod_dict['PEITC[K]'] = 'K NORMAL 163.045570 163.045570 0 H(9)C(9)N(1)S(1)'
mod_dict['PEO-Iodoacetyl-LC-Biotin[C]'] = 'C NORMAL 414.193691 414.193691 0 H(30)C(18)N(4)O(5)S(1)'
mod_dict['PET[S]'] = 'S NORMAL 121.035005 121.035005 0 H(7)C(7)N(1)O(-1)S(1)'
mod_dict['PET[T]'] = 'T NORMAL 121.035005 121.035005 0 H(7)C(7)N(1)O(-1)S(1)'
mod_dict['PS_Hapten[C]'] = 'C NORMAL 120.021129 120.021129 0 H(4)C(7)O(2)'
mod_dict['PS_Hapten[H]'] = 'H NORMAL 120.021129 120.021129 0 H(4)C(7)O(2)'
mod_dict['PS_Hapten[K]'] = 'K NORMAL 120.021129 120.021129 0 H(4)C(7)O(2)'
mod_dict['Palmitoleyl[C]'] = 'C NORMAL 236.214016 236.214016 0 H(28)C(16)O(1)'
mod_dict['Palmitoleyl[S]'] = 'S NORMAL 236.214016 236.214016 0 H(28)C(16)O(1)'
mod_dict['Palmitoleyl[T]'] = 'T NORMAL 236.214016 236.214016 0 H(28)C(16)O(1)'
mod_dict['Palmitoyl[C]'] = 'C NORMAL 238.229666 238.229666 0 H(30)C(16)O(1)'
mod_dict['Palmitoyl[K]'] = 'K NORMAL 238.229666 238.229666 0 H(30)C(16)O(1)'
mod_dict['Palmitoyl[S]'] = 'S NORMAL 238.229666 238.229666 0 H(30)C(16)O(1)'
mod_dict['Palmitoyl[T]'] = 'T NORMAL 238.229666 238.229666 0 H(30)C(16)O(1)'
mod_dict['Palmitoyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 238.229666 238.229666 0 H(30)C(16)O(1)'
mod_dict['Pentylamine[Q]'] = 'Q NORMAL 85.089149 85.089149 0 H(11)C(5)N(1)'
mod_dict['Phe->Ala[F]'] = 'F NORMAL -76.031300 -76.031300 0 H(-4)C(-6)'
mod_dict['Phe->Arg[F]'] = 'F NORMAL 9.032697 9.032697 0 H(3)C(-3)N(3)'
mod_dict['Phe->Asn[F]'] = 'F NORMAL -33.025486 -33.025486 0 H(-3)C(-5)N(1)O(1)'
mod_dict['Phe->Asp[F]'] = 'F NORMAL -32.041471 -32.041471 0 H(-4)C(-5)O(2)'
mod_dict['Phe->CamCys[F]'] = 'F NORMAL 12.962234 12.962234 0 H(-1)C(-4)N(1)O(1)S(1)'
mod_dict['Phe->Cys[F]'] = 'F NORMAL -44.059229 -44.059229 0 H(-4)C(-6)S(1)'
mod_dict['Phe->Gln[F]'] = 'F NORMAL -19.009836 -19.009836 0 H(-1)C(-4)N(1)O(1)'
mod_dict['Phe->Glu[F]'] = 'F NORMAL -18.025821 -18.025821 0 H(-2)C(-4)O(2)'
mod_dict['Phe->Gly[F]'] = 'F NORMAL -90.046950 -90.046950 0 H(-6)C(-7)'
mod_dict['Phe->His[F]'] = 'F NORMAL -10.009502 -10.009502 0 H(-2)C(-3)N(2)'
mod_dict['Phe->Lys[F]'] = 'F NORMAL -18.973451 -18.973451 0 H(3)C(-3)N(1)'
mod_dict['Phe->Met[F]'] = 'F NORMAL -16.027929 -16.027929 0 C(-4)S(1)'
mod_dict['Phe->Pro[F]'] = 'F NORMAL -50.015650 -50.015650 0 H(-2)C(-4)'
mod_dict['Phe->Ser[F]'] = 'F NORMAL -60.036386 -60.036386 0 H(-4)C(-6)O(1)'
mod_dict['Phe->Thr[F]'] = 'F NORMAL -46.020735 -46.020735 0 H(-2)C(-5)O(1)'
mod_dict['Phe->Trp[F]'] = 'F NORMAL 39.010899 39.010899 0 H(1)C(2)N(1)'
mod_dict['Phe->Val[F]'] = 'F NORMAL -48.000000 -48.000000 0 C(-4)'
mod_dict['Phe->Xle[F]'] = 'F NORMAL -33.984350 -33.984350 0 H(2)C(-3)'
mod_dict['Phenylisocyanate_2H(5)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 124.068498 124.068498 0 2H(5)C(7)N(1)O(1)'
mod_dict['Phospho[C]'] = 'C NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['Phospho[D]'] = 'D NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['Phospho[H]'] = 'H NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['Phospho[K]'] = 'K NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['Phospho[R]'] = 'R NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['Phospho[S]'] = 'S NORMAL 79.966331 79.966331 1 97.976896 97.976896 H(1)O(3)P(1)'
mod_dict['Phospho[T]'] = 'T NORMAL 79.966331 79.966331 1 97.976896 97.976896 H(1)O(3)P(1)'
mod_dict['Phospho[Y]'] = 'Y NORMAL 79.966331 79.966331 0 H(1)O(3)P(1)'
mod_dict['PhosphoHex[S]'] = 'S NORMAL 242.019154 242.019154 0 H(1)O(3)P(1)Hex(1)'
mod_dict['PhosphoHexNAc[S]'] = 'S NORMAL 283.045704 283.045704 0 H(1)O(3)P(1)HexNAc(1)'
mod_dict['PhosphoHexNAc[T]'] = 'T NORMAL 283.045704 283.045704 0 H(1)O(3)P(1)HexNAc(1)'
mod_dict['PhosphoUridine[H]'] = 'H NORMAL 306.025302 306.025302 0 H(11)C(9)N(2)O(8)P(1)'
mod_dict['PhosphoUridine[Y]'] = 'Y NORMAL 306.025302 306.025302 0 H(11)C(9)N(2)O(8)P(1)'
mod_dict['Phosphoadenosine[H]'] = 'H NORMAL 329.052520 329.052520 0 H(12)C(10)N(5)O(6)P(1)'
mod_dict['Phosphoadenosine[K]'] = 'K NORMAL 329.052520 329.052520 0 H(12)C(10)N(5)O(6)P(1)'
mod_dict['Phosphoadenosine[T]'] = 'T NORMAL 329.052520 329.052520 0 H(12)C(10)N(5)O(6)P(1)'
mod_dict['Phosphoadenosine[Y]'] = 'Y NORMAL 329.052520 329.052520 0 H(12)C(10)N(5)O(6)P(1)'
mod_dict['Phosphogluconoylation[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 258.014069 258.014069 0 H(11)C(6)O(9)P(1)'
mod_dict['Phosphogluconoylation[K]'] = 'K NORMAL 258.014069 258.014069 0 H(11)C(6)O(9)P(1)'
mod_dict['Phosphoguanosine[H]'] = 'H NORMAL 345.047435 345.047435 0 H(12)C(10)N(5)O(7)P(1)'
mod_dict['Phosphoguanosine[K]'] = 'K NORMAL 345.047435 345.047435 0 H(12)C(10)N(5)O(7)P(1)'
mod_dict['Phosphopantetheine[S]'] = 'S NORMAL 340.085794 340.085794 0 H(21)C(11)N(2)O(6)P(1)S(1)'
mod_dict['Phosphopropargyl[S]'] = 'S NORMAL 116.997965 116.997965 0 H(4)C(3)N(1)O(2)P(1)'
mod_dict['Phosphopropargyl[T]'] = 'T NORMAL 116.997965 116.997965 0 H(4)C(3)N(1)O(2)P(1)'
mod_dict['Phosphopropargyl[Y]'] = 'Y NORMAL 116.997965 116.997965 0 H(4)C(3)N(1)O(2)P(1)'
mod_dict['PhosphoribosyldephosphoCoA[S]'] = 'S NORMAL 881.146904 881.146904 0 H(42)C(26)N(7)O(19)P(3)S(1)'
mod_dict['Phycocyanobilin[C]'] = 'C NORMAL 586.279135 586.279135 0 H(38)C(33)N(4)O(6)'
mod_dict['Phycoerythrobilin[C]'] = 'C NORMAL 588.294785 588.294785 0 H(40)C(33)N(4)O(6)'
mod_dict['Phytochromobilin[C]'] = 'C NORMAL 584.263485 584.263485 0 H(36)C(33)N(4)O(6)'
mod_dict['Piperidine[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 68.062600 68.062600 0 H(8)C(5)'
mod_dict['Piperidine[K]'] = 'K NORMAL 68.062600 68.062600 0 H(8)C(5)'
mod_dict['Pro->Ala[P]'] = 'P NORMAL -26.015650 -26.015650 0 H(-2)C(-2)'
mod_dict['Pro->Arg[P]'] = 'P NORMAL 59.048347 59.048347 0 H(5)C(1)N(3)'
mod_dict['Pro->Asn[P]'] = 'P NORMAL 16.990164 16.990164 0 H(-1)C(-1)N(1)O(1)'
mod_dict['Pro->Asp[P]'] = 'P NORMAL 17.974179 17.974179 0 H(-2)C(-1)O(2)'
mod_dict['Pro->Cys[P]'] = 'P NORMAL 5.956421 5.956421 0 H(-2)C(-2)S(1)'
mod_dict['Pro->Gln[P]'] = 'P NORMAL 31.005814 31.005814 0 H(1)N(1)O(1)'
mod_dict['Pro->Gly[P]'] = 'P NORMAL -40.031300 -40.031300 0 H(-4)C(-3)'
mod_dict['Pro->His[P]'] = 'P NORMAL 40.006148 40.006148 0 C(1)N(2)'
mod_dict['Pro->Lys[P]'] = 'P NORMAL 31.042199 31.042199 0 H(5)C(1)N(1)'
mod_dict['Pro->Met[P]'] = 'P NORMAL 33.987721 33.987721 0 H(2)S(1)'
mod_dict['Pro->Phe[P]'] = 'P NORMAL 50.015650 50.015650 0 H(2)C(4)'
mod_dict['Pro->Pyrrolidinone[P]'] = 'P NORMAL -30.010565 -30.010565 0 H(-2)C(-1)O(-1)'
mod_dict['Pro->Pyrrolidone[P]'] = 'P NORMAL -27.994915 -27.994915 0 C(-1)O(-1)'
mod_dict['Pro->Ser[P]'] = 'P NORMAL -10.020735 -10.020735 0 H(-2)C(-2)O(1)'
mod_dict['Pro->Thr[P]'] = 'P NORMAL 3.994915 3.994915 0 C(-1)O(1)'
mod_dict['Pro->Trp[P]'] = 'P NORMAL 89.026549 89.026549 0 H(3)C(6)N(1)'
mod_dict['Pro->Tyr[P]'] = 'P NORMAL 66.010565 66.010565 0 H(2)C(4)O(1)'
mod_dict['Pro->Val[P]'] = 'P NORMAL 2.015650 2.015650 0 H(2)'
mod_dict['Pro->Xle[P]'] = 'P NORMAL 16.031300 16.031300 0 H(4)C(1)'
mod_dict['Pro->pyro-Glu[P]'] = 'P NORMAL 13.979265 13.979265 0 H(-2)O(1)'
mod_dict['Propargylamine[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 37.031634 37.031634 0 H(3)C(3)N(1)O(-1)'
mod_dict['Propargylamine[D]'] = 'D NORMAL 37.031634 37.031634 0 H(3)C(3)N(1)O(-1)'
mod_dict['Propargylamine[E]'] = 'E NORMAL 37.031634 37.031634 0 H(3)C(3)N(1)O(-1)'
mod_dict['Propionamide[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 71.037114 71.037114 0 H(5)C(3)N(1)O(1)'
mod_dict['Propionamide[C]'] = 'C NORMAL 71.037114 71.037114 0 H(5)C(3)N(1)O(1)'
mod_dict['Propionamide[K]'] = 'K NORMAL 71.037114 71.037114 0 H(5)C(3)N(1)O(1)'
mod_dict['Propionamide_2H(3)[C]'] = 'C NORMAL 74.055944 74.055944 0 H(2)2H(3)C(3)N(1)O(1)'
mod_dict['Propionyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Propionyl[K]'] = 'K NORMAL 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Propionyl[S]'] = 'S NORMAL 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Propionyl[T]'] = 'T NORMAL 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Propionyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 56.026215 56.026215 0 H(4)C(3)O(1)'
mod_dict['Propionyl_13C(3)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 59.036279 59.036279 0 H(4)13C(3)O(1)'
mod_dict['Propionyl_13C(3)[K]'] = 'K NORMAL 59.036279 59.036279 0 H(4)13C(3)O(1)'
mod_dict['Propiophenone[C]'] = 'C NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[H]'] = 'H NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[K]'] = 'K NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[R]'] = 'R NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[S]'] = 'S NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[T]'] = 'T NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propiophenone[W]'] = 'W NORMAL 132.057515 132.057515 0 H(8)C(9)O(1)'
mod_dict['Propyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Propyl[K]'] = 'K NORMAL 42.046950 42.046950 0 H(6)C(3)'
mod_dict['PropylNAGthiazoline[C]'] = 'C NORMAL 232.064354 232.064354 0 H(14)C(9)N(1)O(4)S(1)'
mod_dict['Propyl_2H(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 48.084611 48.084611 0 2H(6)C(3)'
mod_dict['Propyl_2H(6)[K]'] = 'K NORMAL 48.084611 48.084611 0 2H(6)C(3)'
mod_dict['Puromycin[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 453.212452 453.212452 0 H(27)C(22)N(7)O(4)'
mod_dict['PyMIC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 134.048013 134.048013 0 H(6)C(7)N(2)O(1)'
mod_dict['PyridoxalPhosphate[K]'] = 'K NORMAL 229.014009 229.014009 0 H(8)C(8)N(1)O(5)P(1)'
mod_dict['PyridoxalPhosphateH2[K]'] = 'K NORMAL 231.029660 231.029660 0 H(10)C(8)N(1)O(5)P(1)'
mod_dict['Pyridylacetyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 119.037114 119.037114 0 H(5)C(7)N(1)O(1)'
mod_dict['Pyridylacetyl[K]'] = 'K NORMAL 119.037114 119.037114 0 H(5)C(7)N(1)O(1)'
mod_dict['Pyridylethyl[C]'] = 'C NORMAL 105.057849 105.057849 0 H(7)C(7)N(1)'
mod_dict['Pyro-carbamidomethyl[AnyN-termC]'] = 'C PEP_N 39.994915 39.994915 0 C(2)O(1)'
mod_dict['PyruvicAcidIminyl[K]'] = 'K NORMAL 70.005479 70.005479 0 H(2)C(3)O(2)'
mod_dict['PyruvicAcidIminyl[ProteinN-termC]'] = 'C PRO_N 70.005479 70.005479 0 H(2)C(3)O(2)'
mod_dict['PyruvicAcidIminyl[ProteinN-termV]'] = 'V PRO_N 70.005479 70.005479 0 H(2)C(3)O(2)'
mod_dict['QAT[C]'] = 'C NORMAL 171.149738 171.149738 0 H(19)C(9)N(2)O(1)'
mod_dict['QAT_2H(3)[C]'] = 'C NORMAL 174.168569 174.168569 0 H(16)2H(3)C(9)N(2)O(1)'
mod_dict['QEQTGG[K]'] = 'K NORMAL 600.250354 600.250354 0 H(36)C(23)N(8)O(11)'
mod_dict['QQQTGG[K]'] = 'K NORMAL 599.266339 599.266339 0 H(37)C(23)N(9)O(10)'
mod_dict['QTGG[K]'] = 'K NORMAL 343.149184 343.149184 0 H(21)C(13)N(5)O(6)'
mod_dict['Quinone[W]'] = 'W NORMAL 29.974179 29.974179 0 H(-2)O(2)'
mod_dict['Quinone[Y]'] = 'Y NORMAL 29.974179 29.974179 0 H(-2)O(2)'
mod_dict['Retinylidene[K]'] = 'K NORMAL 266.203451 266.203451 0 H(26)C(20)'
mod_dict['SMA[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 127.063329 127.063329 0 H(9)C(6)N(1)O(2)'
mod_dict['SMA[K]'] = 'K NORMAL 127.063329 127.063329 0 H(9)C(6)N(1)O(2)'
mod_dict['SMCC-maleimide[C]'] = 'C NORMAL 321.205242 321.205242 0 H(27)C(17)N(3)O(3)'
mod_dict['SPITC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 214.971084 214.971084 0 H(5)C(7)N(1)O(3)S(2)'
mod_dict['SPITC[K]'] = 'K NORMAL 214.971084 214.971084 0 H(5)C(7)N(1)O(3)S(2)'
mod_dict['SPITC_13C(6)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 220.991213 220.991213 0 H(5)C(1)13C(6)N(1)O(3)S(2)'
mod_dict['SPITC_13C(6)[K]'] = 'K NORMAL 220.991213 220.991213 0 H(5)C(1)13C(6)N(1)O(3)S(2)'
mod_dict['SUMO2135[K]'] = 'K NORMAL 2135.920496 2135.920496 0 H(137)C(90)N(21)O(37)S(1)'
mod_dict['SUMO3549[K]'] = 'K NORMAL 3549.536568 3549.536568 0 H(224)C(150)N(38)O(60)S(1)'
mod_dict['Saligenin[H]'] = 'H NORMAL 106.041865 106.041865 0 H(6)C(7)O(1)'
mod_dict['Saligenin[K]'] = 'K NORMAL 106.041865 106.041865 0 H(6)C(7)O(1)'
mod_dict['SecCarbamidomethyl[C]'] = 'C NORMAL 104.965913 104.965913 0 H(3)C(2)N(1)O(1)S(-1)Se(1)'
mod_dict['SecNEM[C]'] = 'C NORMAL 172.992127 172.992127 0 H(7)C(6)N(1)O(2)S(-1)Se(1)'
mod_dict['SecNEM_2H(5)[C]'] = 'C NORMAL 178.023511 178.023511 0 H(2)2H(5)C(6)N(1)O(2)S(-1)Se(1)'
mod_dict['Ser->Arg[S]'] = 'S NORMAL 69.069083 69.069083 0 H(7)C(3)N(3)O(-1)'
mod_dict['Ser->Asn[S]'] = 'S NORMAL 27.010899 27.010899 0 H(1)C(1)N(1)'
mod_dict['Ser->Cys[S]'] = 'S NORMAL 15.977156 15.977156 0 O(-1)S(1)'
mod_dict['Ser->Gln[S]'] = 'S NORMAL 41.026549 41.026549 0 H(3)C(2)N(1)'
mod_dict['Ser->Gly[S]'] = 'S NORMAL -30.010565 -30.010565 0 H(-2)C(-1)O(-1)'
mod_dict['Ser->His[S]'] = 'S NORMAL 50.026883 50.026883 0 H(2)C(3)N(2)O(-1)'
mod_dict['Ser->LacticAcid[ProteinN-termS]'] = 'S PRO_N -15.010899 -15.010899 0 H(-1)N(-1)'
mod_dict['Ser->Lys[S]'] = 'S NORMAL 41.062935 41.062935 0 H(7)C(3)N(1)O(-1)'
mod_dict['Ser->Phe[S]'] = 'S NORMAL 60.036386 60.036386 0 H(4)C(6)O(-1)'
mod_dict['Ser->Pro[S]'] = 'S NORMAL 10.020735 10.020735 0 H(2)C(2)O(-1)'
mod_dict['Ser->Trp[S]'] = 'S NORMAL 99.047285 99.047285 0 H(5)C(8)N(1)O(-1)'
mod_dict['Ser->Tyr[S]'] = 'S NORMAL 76.031300 76.031300 0 H(4)C(6)'
mod_dict['Ser->Val[S]'] = 'S NORMAL 12.036386 12.036386 0 H(4)C(2)O(-1)'
mod_dict['Ser->Xle[S]'] = 'S NORMAL 26.052036 26.052036 0 H(6)C(3)O(-1)'
mod_dict['Succinyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 100.016044 100.016044 0 H(4)C(4)O(3)'
mod_dict['Succinyl[K]'] = 'K NORMAL 100.016044 100.016044 0 H(4)C(4)O(3)'
mod_dict['Succinyl[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 100.016044 100.016044 0 H(4)C(4)O(3)'
mod_dict['Succinyl_13C(4)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 104.029463 104.029463 0 H(4)13C(4)O(3)'
mod_dict['Succinyl_13C(4)[K]'] = 'K NORMAL 104.029463 104.029463 0 H(4)13C(4)O(3)'
mod_dict['Succinyl_2H(4)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 104.041151 104.041151 0 2H(4)C(4)O(3)'
mod_dict['Succinyl_2H(4)[K]'] = 'K NORMAL 104.041151 104.041151 0 2H(4)C(4)O(3)'
mod_dict['SulfanilicAcid[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 155.004099 155.004099 0 H(5)C(6)N(1)O(2)S(1)'
mod_dict['SulfanilicAcid[D]'] = 'D NORMAL 155.004099 155.004099 0 H(5)C(6)N(1)O(2)S(1)'
mod_dict['SulfanilicAcid[E]'] = 'E NORMAL 155.004099 155.004099 0 H(5)C(6)N(1)O(2)S(1)'
mod_dict['SulfanilicAcid_13C(6)[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C 161.024228 161.024228 0 H(5)13C(6)N(1)O(2)S(1)'
mod_dict['SulfanilicAcid_13C(6)[D]'] = 'D NORMAL 161.024228 161.024228 0 H(5)13C(6)N(1)O(2)S(1)'
mod_dict['SulfanilicAcid_13C(6)[E]'] = 'E NORMAL 161.024228 161.024228 0 H(5)13C(6)N(1)O(2)S(1)'
mod_dict['Sulfide[C]'] = 'C NORMAL 31.972071 31.972071 0 S(1)'
mod_dict['Sulfide[D]'] = 'D NORMAL 31.972071 31.972071 0 S(1)'
mod_dict['Sulfide[W]'] = 'W NORMAL 31.972071 31.972071 0 S(1)'
mod_dict['Sulfo-NHS-LC-LC-Biotin[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 452.245726 452.245726 0 H(36)C(22)N(4)O(4)S(1)'
mod_dict['Sulfo-NHS-LC-LC-Biotin[K]'] = 'K NORMAL 452.245726 452.245726 0 H(36)C(22)N(4)O(4)S(1)'
mod_dict['Sulfo[C]'] = 'C NORMAL 79.956815 79.956815 0 O(3)S(1)'
mod_dict['Sulfo[S]'] = 'S NORMAL 79.956815 79.956815 1 79.956815 79.956815 O(3)S(1)'
mod_dict['Sulfo[T]'] = 'T NORMAL 79.956815 79.956815 1 79.956815 79.956815 O(3)S(1)'
mod_dict['Sulfo[Y]'] = 'Y NORMAL 79.956815 79.956815 1 79.956815 79.956815 O(3)S(1)'
mod_dict['SulfoGMBS[C]'] = 'C NORMAL 458.162391 458.162391 0 H(26)C(22)N(4)O(5)S(1)'
mod_dict['SulfurDioxide[C]'] = 'C NORMAL 63.961900 63.961900 0 O(2)S(1)'
mod_dict['TAMRA-FP[S]'] = 'S NORMAL 659.312423 659.312423 0 H(46)C(37)N(3)O(6)P(1)'
mod_dict['TAMRA-FP[Y]'] = 'Y NORMAL 659.312423 659.312423 0 H(46)C(37)N(3)O(6)P(1)'
mod_dict['TMAB[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 128.107539 128.107539 1 59.073499 59.073499 H(14)C(7)N(1)O(1)'
mod_dict['TMAB[K]'] = 'K NORMAL 128.107539 128.107539 1 59.073499 59.073499 H(14)C(7)N(1)O(1)'
mod_dict['TMAB_2H(9)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 137.164030 137.164030 1 68.129990 68.129990 H(5)2H(9)C(7)N(1)O(1)'
mod_dict['TMAB_2H(9)[K]'] = 'K NORMAL 137.164030 137.164030 1 68.129990 68.129990 H(5)2H(9)C(7)N(1)O(1)'
mod_dict['TMPP-Ac[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 572.181134 572.181134 0 H(33)C(29)O(10)P(1)'
mod_dict['TMT[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT[H]'] = 'H NORMAL 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT[K]'] = 'K NORMAL 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT[S]'] = 'S NORMAL 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT[T]'] = 'T NORMAL 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 224.152478 224.152478 0 H(20)C(12)N(2)O(2)'
mod_dict['TMT2plex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT2plex[H]'] = 'H NORMAL 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT2plex[K]'] = 'K NORMAL 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT2plex[S]'] = 'S NORMAL 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT2plex[T]'] = 'T NORMAL 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT2plex[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 225.155833 225.155833 0 H(20)C(11)13C(1)N(2)O(2)'
mod_dict['TMT6plex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TMT6plex[H]'] = 'H NORMAL 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TMT6plex[K]'] = 'K NORMAL 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TMT6plex[S]'] = 'S NORMAL 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TMT6plex[T]'] = 'T NORMAL 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TMT6plex[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 229.162932 229.162932 0 H(20)C(8)13C(4)N(1)15N(1)O(2)'
mod_dict['TNBS[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 210.986535 210.986535 0 H(1)C(6)N(3)O(6)'
mod_dict['TNBS[K]'] = 'K NORMAL 210.986535 210.986535 0 H(1)C(6)N(3)O(6)'
mod_dict['Thiadiazole[C]'] = 'C NORMAL 174.025169 174.025169 0 H(6)C(9)N(2)S(1)'
mod_dict['Thiazolidine[AnyN-termC]'] = 'C PEP_N 12.000000 12.000000 0 C(1)'
mod_dict['Thioacyl[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 87.998285 87.998285 0 H(4)C(3)O(1)S(1)'
mod_dict['Thioacyl[K]'] = 'K NORMAL 87.998285 87.998285 0 H(4)C(3)O(1)S(1)'
mod_dict['Thiophos-S-S-biotin[S]'] = 'S NORMAL 525.142894 525.142894 1 525.142894 525.142894 H(34)C(19)N(4)O(5)P(1)S(3)'
mod_dict['Thiophos-S-S-biotin[T]'] = 'T NORMAL 525.142894 525.142894 1 525.142894 525.142894 H(34)C(19)N(4)O(5)P(1)S(3)'
mod_dict['Thiophos-S-S-biotin[Y]'] = 'Y NORMAL 525.142894 525.142894 1 525.142894 525.142894 H(34)C(19)N(4)O(5)P(1)S(3)'
mod_dict['Thiophospho[S]'] = 'S NORMAL 95.943487 95.943487 0 H(1)O(2)P(1)S(1)'
mod_dict['Thiophospho[T]'] = 'T NORMAL 95.943487 95.943487 0 H(1)O(2)P(1)S(1)'
mod_dict['Thiophospho[Y]'] = 'Y NORMAL 95.943487 95.943487 0 H(1)O(2)P(1)S(1)'
mod_dict['Thr->Ala[T]'] = 'T NORMAL -30.010565 -30.010565 0 H(-2)C(-1)O(-1)'
mod_dict['Thr->Arg[T]'] = 'T NORMAL 55.053433 55.053433 0 H(5)C(2)N(3)O(-1)'
mod_dict['Thr->Asn[T]'] = 'T NORMAL 12.995249 12.995249 0 H(-1)N(1)'
mod_dict['Thr->Asp[T]'] = 'T NORMAL 13.979265 13.979265 0 H(-2)O(1)'
mod_dict['Thr->Cys[T]'] = 'T NORMAL 1.961506 1.961506 0 H(-2)C(-1)O(-1)S(1)'
mod_dict['Thr->Gln[T]'] = 'T NORMAL 27.010899 27.010899 0 H(1)C(1)N(1)'
mod_dict['Thr->Gly[T]'] = 'T NORMAL -44.026215 -44.026215 0 H(-4)C(-2)O(-1)'
mod_dict['Thr->His[T]'] = 'T NORMAL 36.011233 36.011233 0 C(2)N(2)O(-1)'
mod_dict['Thr->Met[T]'] = 'T NORMAL 29.992806 29.992806 0 H(2)C(1)O(-1)S(1)'
mod_dict['Thr->Phe[T]'] = 'T NORMAL 46.020735 46.020735 0 H(2)C(5)O(-1)'
mod_dict['Thr->Pro[T]'] = 'T NORMAL -3.994915 -3.994915 0 C(1)O(-1)'
mod_dict['Thr->Ser[T]'] = 'T NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Thr->Trp[T]'] = 'T NORMAL 85.031634 85.031634 0 H(3)C(7)N(1)O(-1)'
mod_dict['Thr->Tyr[T]'] = 'T NORMAL 62.015650 62.015650 0 H(2)C(5)'
mod_dict['Thr->Val[T]'] = 'T NORMAL -1.979265 -1.979265 0 H(2)C(1)O(-1)'
mod_dict['Thr->Xle[T]'] = 'T NORMAL 12.036386 12.036386 0 H(4)C(2)O(-1)'
mod_dict['Thrbiotinhydrazide[T]'] = 'T NORMAL 240.104482 240.104482 0 H(16)C(10)N(4)O(1)S(1)'
mod_dict['Thyroxine[Y]'] = 'Y NORMAL 595.612807 595.612807 0 C(6)O(1)I(4)'
mod_dict['Triiodo[Y]'] = 'Y NORMAL 377.689944 377.689944 0 H(-3)I(3)'
mod_dict['Triiodothyronine[Y]'] = 'Y NORMAL 469.716159 469.716159 0 H(1)C(6)O(1)I(3)'
mod_dict['Trimethyl[R]'] = 'R NORMAL 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Trimethyl[ProteinN-termA]'] = 'A PRO_N 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Trimethyl_2H(9)[K]'] = 'K NORMAL 51.103441 51.103441 0 H(-3)2H(9)C(3)'
mod_dict['Trimethyl_2H(9)[R]'] = 'R NORMAL 51.103441 51.103441 0 H(-3)2H(9)C(3)'
mod_dict['Trioxidation[C]'] = 'C NORMAL 47.984744 47.984744 0 O(3)'
mod_dict['Trioxidation[W]'] = 'W NORMAL 47.984744 47.984744 0 O(3)'
mod_dict['Trioxidation[Y]'] = 'Y NORMAL 47.984744 47.984744 0 O(3)'
mod_dict['Tripalmitate[ProteinN-termC]'] = 'C PRO_N 788.725777 788.725777 0 H(96)C(51)O(5)'
mod_dict['Trp->Ala[W]'] = 'W NORMAL -115.042199 -115.042199 0 H(-5)C(-8)N(-1)'
mod_dict['Trp->Arg[W]'] = 'W NORMAL -29.978202 -29.978202 0 H(2)C(-5)N(2)'
mod_dict['Trp->Asn[W]'] = 'W NORMAL -72.036386 -72.036386 0 H(-4)C(-7)O(1)'
mod_dict['Trp->Asp[W]'] = 'W NORMAL -71.052370 -71.052370 0 H(-5)C(-7)N(-1)O(2)'
mod_dict['Trp->Cys[W]'] = 'W NORMAL -83.070128 -83.070128 0 H(-5)C(-8)N(-1)S(1)'
mod_dict['Trp->Gln[W]'] = 'W NORMAL -58.020735 -58.020735 0 H(-2)C(-6)O(1)'
mod_dict['Trp->Glu[W]'] = 'W NORMAL -57.036720 -57.036720 0 H(-3)C(-6)N(-1)O(2)'
mod_dict['Trp->Gly[W]'] = 'W NORMAL -129.057849 -129.057849 0 H(-7)C(-9)N(-1)'
mod_dict['Trp->His[W]'] = 'W NORMAL -49.020401 -49.020401 0 H(-3)C(-5)N(1)'
mod_dict['Trp->Hydroxykynurenin[W]'] = 'W NORMAL 19.989829 19.989829 0 C(-1)O(2)'
mod_dict['Trp->Kynurenin[W]'] = 'W NORMAL 3.994915 3.994915 0 C(-1)O(1)'
mod_dict['Trp->Lys[W]'] = 'W NORMAL -57.984350 -57.984350 0 H(2)C(-5)'
mod_dict['Trp->Met[W]'] = 'W NORMAL -55.038828 -55.038828 0 H(-1)C(-6)N(-1)S(1)'
mod_dict['Trp->Oxolactone[W]'] = 'W NORMAL 13.979265 13.979265 0 H(-2)O(1)'
mod_dict['Trp->Phe[W]'] = 'W NORMAL -39.010899 -39.010899 0 H(-1)C(-2)N(-1)'
mod_dict['Trp->Pro[W]'] = 'W NORMAL -89.026549 -89.026549 0 H(-3)C(-6)N(-1)'
mod_dict['Trp->Ser[W]'] = 'W NORMAL -99.047285 -99.047285 0 H(-5)C(-8)N(-1)O(1)'
mod_dict['Trp->Thr[W]'] = 'W NORMAL -85.031634 -85.031634 0 H(-3)C(-7)N(-1)O(1)'
mod_dict['Trp->Tyr[W]'] = 'W NORMAL -23.015984 -23.015984 0 H(-1)C(-2)N(-1)O(1)'
mod_dict['Trp->Val[W]'] = 'W NORMAL -87.010899 -87.010899 0 H(-1)C(-6)N(-1)'
mod_dict['Trp->Xle[W]'] = 'W NORMAL -72.995249 -72.995249 0 H(1)C(-5)N(-1)'
mod_dict['Tyr->Ala[Y]'] = 'Y NORMAL -92.026215 -92.026215 0 H(-4)C(-6)O(-1)'
mod_dict['Tyr->Arg[Y]'] = 'Y NORMAL -6.962218 -6.962218 0 H(3)C(-3)N(3)O(-1)'
mod_dict['Tyr->Asn[Y]'] = 'Y NORMAL -49.020401 -49.020401 0 H(-3)C(-5)N(1)'
mod_dict['Tyr->Asp[Y]'] = 'Y NORMAL -48.036386 -48.036386 0 H(-4)C(-5)O(1)'
mod_dict['Tyr->Cys[Y]'] = 'Y NORMAL -60.054144 -60.054144 0 H(-4)C(-6)O(-1)S(1)'
mod_dict['Tyr->Dha[Y]'] = 'Y NORMAL -94.041865 -94.041865 0 H(-6)C(-6)O(-1)'
mod_dict['Tyr->Gln[Y]'] = 'Y NORMAL -35.004751 -35.004751 0 H(-1)C(-4)N(1)'
mod_dict['Tyr->Glu[Y]'] = 'Y NORMAL -34.020735 -34.020735 0 H(-2)C(-4)O(1)'
mod_dict['Tyr->Gly[Y]'] = 'Y NORMAL -106.041865 -106.041865 0 H(-6)C(-7)O(-1)'
mod_dict['Tyr->His[Y]'] = 'Y NORMAL -26.004417 -26.004417 0 H(-2)C(-3)N(2)O(-1)'
mod_dict['Tyr->Lys[Y]'] = 'Y NORMAL -34.968366 -34.968366 0 H(3)C(-3)N(1)O(-1)'
mod_dict['Tyr->Met[Y]'] = 'Y NORMAL -32.022844 -32.022844 0 C(-4)O(-1)S(1)'
mod_dict['Tyr->Phe[Y]'] = 'Y NORMAL -15.994915 -15.994915 0 O(-1)'
mod_dict['Tyr->Pro[Y]'] = 'Y NORMAL -66.010565 -66.010565 0 H(-2)C(-4)O(-1)'
mod_dict['Tyr->Ser[Y]'] = 'Y NORMAL -76.031300 -76.031300 0 H(-4)C(-6)'
mod_dict['Tyr->Thr[Y]'] = 'Y NORMAL -62.015650 -62.015650 0 H(-2)C(-5)'
mod_dict['Tyr->Trp[Y]'] = 'Y NORMAL 23.015984 23.015984 0 H(1)C(2)N(1)O(-1)'
mod_dict['Tyr->Val[Y]'] = 'Y NORMAL -63.994915 -63.994915 0 C(-4)O(-1)'
mod_dict['Tyr->Xle[Y]'] = 'Y NORMAL -49.979265 -49.979265 0 H(2)C(-3)O(-1)'
mod_dict['Ub-Br2[C]'] = 'C NORMAL 100.063663 100.063663 0 H(8)C(4)N(2)O(1)'
mod_dict['Ub-VME[C]'] = 'C NORMAL 173.092617 173.092617 0 H(13)C(7)N(2)O(3)'
mod_dict['Ub-amide[C]'] = 'C NORMAL 196.108602 196.108602 0 H(14)C(9)N(3)O(2)'
mod_dict['Ub-fluorescein[C]'] = 'C NORMAL 597.209772 597.209772 0 H(29)C(31)N(6)O(7)'
mod_dict['UgiJoullie[D]'] = 'D NORMAL 1106.489350 1106.489350 0 H(60)C(47)N(23)O(10)'
mod_dict['UgiJoullie[E]'] = 'E NORMAL 1106.489350 1106.489350 0 H(60)C(47)N(23)O(10)'
mod_dict['UgiJoullieProGly[D]'] = 'D NORMAL 154.074228 154.074228 0 H(10)C(7)N(2)O(2)'
mod_dict['UgiJoullieProGly[E]'] = 'E NORMAL 154.074228 154.074228 0 H(10)C(7)N(2)O(2)'
mod_dict['UgiJoullieProGlyProGly[D]'] = 'D NORMAL 308.148455 308.148455 0 H(20)C(14)N(4)O(4)'
mod_dict['UgiJoullieProGlyProGly[E]'] = 'E NORMAL 308.148455 308.148455 0 H(20)C(14)N(4)O(4)'
mod_dict['VFQQQTGG[K]'] = 'K NORMAL 845.403166 845.403166 0 H(55)C(37)N(11)O(12)'
mod_dict['VIEVYQEQTGG[K]'] = 'K NORMAL 1203.577168 1203.577168 0 H(81)C(53)N(13)O(19)'
mod_dict['Val->Ala[V]'] = 'V NORMAL -28.031300 -28.031300 0 H(-4)C(-2)'
mod_dict['Val->Arg[V]'] = 'V NORMAL 57.032697 57.032697 0 H(3)C(1)N(3)'
mod_dict['Val->Asn[V]'] = 'V NORMAL 14.974514 14.974514 0 H(-3)C(-1)N(1)O(1)'
mod_dict['Val->Asp[V]'] = 'V NORMAL 15.958529 15.958529 0 H(-4)C(-1)O(2)'
mod_dict['Val->Cys[V]'] = 'V NORMAL 3.940771 3.940771 0 H(-4)C(-2)S(1)'
mod_dict['Val->Gln[V]'] = 'V NORMAL 28.990164 28.990164 0 H(-1)N(1)O(1)'
mod_dict['Val->Glu[V]'] = 'V NORMAL 29.974179 29.974179 0 H(-2)O(2)'
mod_dict['Val->Gly[V]'] = 'V NORMAL -42.046950 -42.046950 0 H(-6)C(-3)'
mod_dict['Val->His[V]'] = 'V NORMAL 37.990498 37.990498 0 H(-2)C(1)N(2)'
mod_dict['Val->Lys[V]'] = 'V NORMAL 29.026549 29.026549 0 H(3)C(1)N(1)'
mod_dict['Val->Met[V]'] = 'V NORMAL 31.972071 31.972071 0 S(1)'
mod_dict['Val->Phe[V]'] = 'V NORMAL 48.000000 48.000000 0 C(4)'
mod_dict['Val->Pro[V]'] = 'V NORMAL -2.015650 -2.015650 0 H(-2)'
mod_dict['Val->Ser[V]'] = 'V NORMAL -12.036386 -12.036386 0 H(-4)C(-2)O(1)'
mod_dict['Val->Thr[V]'] = 'V NORMAL 1.979265 1.979265 0 H(-2)C(-1)O(1)'
mod_dict['Val->Trp[V]'] = 'V NORMAL 87.010899 87.010899 0 H(1)C(6)N(1)'
mod_dict['Val->Tyr[V]'] = 'V NORMAL 63.994915 63.994915 0 C(4)O(1)'
mod_dict['Val->Xle[V]'] = 'V NORMAL 14.015650 14.015650 0 H(2)C(1)'
mod_dict['Withaferin[C]'] = 'C NORMAL 470.266839 470.266839 0 H(38)C(28)O(6)'
mod_dict['Xle->Ala[I]'] = 'I NORMAL -42.046950 -42.046950 0 H(-6)C(-3)'
mod_dict['Xle->Ala[L]'] = 'L NORMAL -42.046950 -42.046950 0 H(-6)C(-3)'
mod_dict['Xle->Arg[I]'] = 'I NORMAL 43.017047 43.017047 0 H(1)N(3)'
mod_dict['Xle->Arg[L]'] = 'L NORMAL 43.017047 43.017047 0 H(1)N(3)'
mod_dict['Xle->Asn[I]'] = 'I NORMAL 0.958863 0.958863 0 H(-5)C(-2)N(1)O(1)'
mod_dict['Xle->Asn[L]'] = 'L NORMAL 0.958863 0.958863 0 H(-5)C(-2)N(1)O(1)'
mod_dict['Xle->Asp[I]'] = 'I NORMAL 1.942879 1.942879 0 H(-6)C(-2)O(2)'
mod_dict['Xle->Asp[L]'] = 'L NORMAL 1.942879 1.942879 0 H(-6)C(-2)O(2)'
mod_dict['Xle->Cys[I]'] = 'I NORMAL -10.074880 -10.074880 0 H(-6)C(-3)S(1)'
mod_dict['Xle->Cys[L]'] = 'L NORMAL -10.074880 -10.074880 0 H(-6)C(-3)S(1)'
mod_dict['Xle->Gln[I]'] = 'I NORMAL 14.974514 14.974514 0 H(-3)C(-1)N(1)O(1)'
mod_dict['Xle->Gln[L]'] = 'L NORMAL 14.974514 14.974514 0 H(-3)C(-1)N(1)O(1)'
mod_dict['Xle->Glu[I]'] = 'I NORMAL 15.958529 15.958529 0 H(-4)C(-1)O(2)'
mod_dict['Xle->Glu[L]'] = 'L NORMAL 15.958529 15.958529 0 H(-4)C(-1)O(2)'
mod_dict['Xle->Gly[I]'] = 'I NORMAL -56.062600 -56.062600 0 H(-8)C(-4)'
mod_dict['Xle->Gly[L]'] = 'L NORMAL -56.062600 -56.062600 0 H(-8)C(-4)'
mod_dict['Xle->His[I]'] = 'I NORMAL 23.974848 23.974848 0 H(-4)N(2)'
mod_dict['Xle->His[L]'] = 'L NORMAL 23.974848 23.974848 0 H(-4)N(2)'
mod_dict['Xle->Lys[I]'] = 'I NORMAL 15.010899 15.010899 0 H(1)N(1)'
mod_dict['Xle->Lys[L]'] = 'L NORMAL 15.010899 15.010899 0 H(1)N(1)'
mod_dict['Xle->Met[I]'] = 'I NORMAL 17.956421 17.956421 0 H(-2)C(-1)S(1)'
mod_dict['Xle->Met[L]'] = 'L NORMAL 17.956421 17.956421 0 H(-2)C(-1)S(1)'
mod_dict['Xle->Phe[I]'] = 'I NORMAL 33.984350 33.984350 0 H(-2)C(3)'
mod_dict['Xle->Phe[L]'] = 'L NORMAL 33.984350 33.984350 0 H(-2)C(3)'
mod_dict['Xle->Pro[I]'] = 'I NORMAL -16.031300 -16.031300 0 H(-4)C(-1)'
mod_dict['Xle->Pro[L]'] = 'L NORMAL -16.031300 -16.031300 0 H(-4)C(-1)'
mod_dict['Xle->Ser[I]'] = 'I NORMAL -26.052036 -26.052036 0 H(-6)C(-3)O(1)'
mod_dict['Xle->Ser[L]'] = 'L NORMAL -26.052036 -26.052036 0 H(-6)C(-3)O(1)'
mod_dict['Xle->Thr[I]'] = 'I NORMAL -12.036386 -12.036386 0 H(-4)C(-2)O(1)'
mod_dict['Xle->Thr[L]'] = 'L NORMAL -12.036386 -12.036386 0 H(-4)C(-2)O(1)'
mod_dict['Xle->Trp[I]'] = 'I NORMAL 72.995249 72.995249 0 H(-1)C(5)N(1)'
mod_dict['Xle->Trp[L]'] = 'L NORMAL 72.995249 72.995249 0 H(-1)C(5)N(1)'
mod_dict['Xle->Tyr[I]'] = 'I NORMAL 49.979265 49.979265 0 H(-2)C(3)O(1)'
mod_dict['Xle->Tyr[L]'] = 'L NORMAL 49.979265 49.979265 0 H(-2)C(3)O(1)'
mod_dict['Xle->Val[I]'] = 'I NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Xle->Val[L]'] = 'L NORMAL -14.015650 -14.015650 0 H(-2)C(-1)'
mod_dict['Xlink_B10621[C]'] = 'C NORMAL 713.093079 713.093079 0 H(30)C(31)N(4)O(6)S(1)I(1)'
mod_dict['Xlink_DMP-de[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['Xlink_DMP-s[K]'] = 'K NORMAL 154.110613 154.110613 0 H(14)C(8)N(2)O(1)'
mod_dict['Xlink_DMP-s[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 154.110613 154.110613 0 H(14)C(8)N(2)O(1)'
mod_dict['Xlink_DMP[K]'] = 'K NORMAL 122.084398 122.084398 0 H(10)C(7)N(2)'
mod_dict['Xlink_DMP[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 122.084398 122.084398 0 H(10)C(7)N(2)'
mod_dict['Xlink_DSS[K]'] = 'K NORMAL 156.078644 156.078644 0 H(12)C(8)O(3)'
mod_dict['Xlink_DSS[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 156.078644 156.078644 0 H(12)C(8)O(3)'
mod_dict['Xlink_DST[K]'] = 'K NORMAL 132.005873 132.005873 0 H(4)C(4)O(5)'
mod_dict['Xlink_DST[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 132.005873 132.005873 0 H(4)C(4)O(5)'
mod_dict['Xlink_DTSSP[K]'] = 'K NORMAL 191.991486 191.991486 0 H(8)C(6)O(3)S(2)'
mod_dict['Xlink_DTSSP[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 191.991486 191.991486 0 H(8)C(6)O(3)S(2)'
mod_dict['Xlink_EGS[K]'] = 'K NORMAL 244.058303 244.058303 0 H(12)C(10)O(7)'
mod_dict['Xlink_EGS[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 244.058303 244.058303 0 H(12)C(10)O(7)'
mod_dict['Xlink_EGScleaved[K]'] = 'K NORMAL 99.032028 99.032028 0 H(5)C(4)N(1)O(2)'
mod_dict['Xlink_EGScleaved[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 99.032028 99.032028 0 H(5)C(4)N(1)O(2)'
mod_dict['Xlink_SMCC[C]'] = 'C NORMAL 237.100108 237.100108 0 H(15)C(12)N(1)O(4)'
mod_dict['Xlink_SSD[K]'] = 'K NORMAL 253.095023 253.095023 0 H(15)C(12)N(1)O(5)'
mod_dict['ZGB[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 758.380841 758.380841 0 H(53)B(1)C(37)N(6)O(6)F(2)S(1)'
mod_dict['ZGB[K]'] = 'K NORMAL 758.380841 758.380841 0 H(53)B(1)C(37)N(6)O(6)F(2)S(1)'
mod_dict['a-type-ion[AnyC-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_C -46.005479 -46.005479 0 H(-2)C(-1)O(-2)'
mod_dict['azole[C]'] = 'C NORMAL -20.026215 -20.026215 0 H(-4)O(-1)'
mod_dict['azole[S]'] = 'S NORMAL -20.026215 -20.026215 0 H(-4)O(-1)'
mod_dict['benzylguanidine[K]'] = 'K NORMAL 132.068748 132.068748 0 H(8)C(8)N(2)'
mod_dict['biotinAcrolein298[C]'] = 'C NORMAL 298.146347 298.146347 0 H(22)C(13)N(4)O(2)S(1)'
mod_dict['biotinAcrolein298[H]'] = 'H NORMAL 298.146347 298.146347 0 H(22)C(13)N(4)O(2)S(1)'
mod_dict['biotinAcrolein298[K]'] = 'K NORMAL 298.146347 298.146347 0 H(22)C(13)N(4)O(2)S(1)'
mod_dict['biotinAcrolein298[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 298.146347 298.146347 0 H(22)C(13)N(4)O(2)S(1)'
mod_dict['bisANS-sulfonates[K]'] = 'K NORMAL 437.201774 437.201774 0 H(25)C(32)N(2)'
mod_dict['bisANS-sulfonates[S]'] = 'S NORMAL 437.201774 437.201774 0 H(25)C(32)N(2)'
mod_dict['bisANS-sulfonates[T]'] = 'T NORMAL 437.201774 437.201774 0 H(25)C(32)N(2)'
mod_dict['cGMP+RMP-loss[C]'] = 'C NORMAL 150.041585 150.041585 0 H(4)C(5)N(5)O(1)'
mod_dict['cGMP+RMP-loss[S]'] = 'S NORMAL 150.041585 150.041585 0 H(4)C(5)N(5)O(1)'
mod_dict['cGMP[C]'] = 'C NORMAL 343.031785 343.031785 0 H(10)C(10)N(5)O(7)P(1)'
mod_dict['cGMP[S]'] = 'S NORMAL 343.031785 343.031785 0 H(10)C(10)N(5)O(7)P(1)'
mod_dict['cysTMT[C]'] = 'C NORMAL 299.166748 299.166748 0 H(25)C(14)N(3)O(2)S(1)'
mod_dict['cysTMT6plex[C]'] = 'C NORMAL 304.177202 304.177202 0 H(25)C(10)13C(4)N(2)15N(1)O(2)S(1)'
mod_dict['dHex(1)Hex(1)[S]'] = 'S NORMAL 308.110732 308.110732 0 dHex(1)Hex(1)'
mod_dict['dHex(1)Hex(1)[T]'] = 'T NORMAL 308.110732 308.110732 0 dHex(1)Hex(1)'
mod_dict['dHex(1)Hex(2)[S]'] = 'S NORMAL 470.163556 470.163556 0 dHex(1)Hex(2)'
mod_dict['dHex(1)Hex(2)[T]'] = 'T NORMAL 470.163556 470.163556 0 dHex(1)Hex(2)'
mod_dict['dHex(1)Hex(3)[S]'] = 'S NORMAL 632.216379 632.216379 0 dHex(1)Hex(3)'
mod_dict['dHex(1)Hex(3)[T]'] = 'T NORMAL 632.216379 632.216379 0 dHex(1)Hex(3)'
mod_dict['dHex(1)Hex(3)HexNAc(4)[N]'] = 'N NORMAL 1444.533870 1444.533870 0 dHex(1)Hex(3)HexNAc(4)'
mod_dict['dHex(1)Hex(4)[S]'] = 'S NORMAL 794.269203 794.269203 0 dHex(1)Hex(4)'
mod_dict['dHex(1)Hex(4)[T]'] = 'T NORMAL 794.269203 794.269203 0 dHex(1)Hex(4)'
mod_dict['dHex(1)Hex(4)HexNAc(4)[N]'] = 'N NORMAL 1606.586693 1606.586693 0 dHex(1)Hex(4)HexNAc(4)'
mod_dict['dHex(1)Hex(5)[S]'] = 'S NORMAL 956.322026 956.322026 0 dHex(1)Hex(5)'
mod_dict['dHex(1)Hex(5)[T]'] = 'T NORMAL 956.322026 956.322026 0 dHex(1)Hex(5)'
mod_dict['dHex(1)Hex(5)HexNAc(4)[N]'] = 'N NORMAL 1768.639517 1768.639517 0 dHex(1)Hex(5)HexNAc(4)'
mod_dict['dHex(1)Hex(6)[S]'] = 'S NORMAL 1118.374850 1118.374850 0 dHex(1)Hex(6)'
mod_dict['dHex(1)Hex(6)[T]'] = 'T NORMAL 1118.374850 1118.374850 0 dHex(1)Hex(6)'
mod_dict['dHex[N]'] = 'N NORMAL 146.057909 146.057909 0 dHex(1)'
mod_dict['dHex[S]'] = 'S NORMAL 146.057909 146.057909 0 dHex(1)'
mod_dict['dHex[T]'] = 'T NORMAL 146.057909 146.057909 0 dHex(1)'
mod_dict['dNIC[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 109.048119 109.048119 0 H(1)2H(3)C(6)N(1)O(1)'
mod_dict['dichlorination[C]'] = 'C NORMAL 69.937705 69.937705 0 Cl(2)'
mod_dict['dichlorination[Y]'] = 'Y NORMAL 69.937705 69.937705 0 Cl(2)'
mod_dict['ethylamino[S]'] = 'S NORMAL 27.047285 27.047285 0 H(5)C(2)N(1)O(-1)'
mod_dict['ethylamino[T]'] = 'T NORMAL 27.047285 27.047285 0 H(5)C(2)N(1)O(-1)'
mod_dict['ethylsulfonylethyl[C]'] = 'C NORMAL 120.024500 120.024500 0 H(8)C(4)O(2)S(1)'
mod_dict['ethylsulfonylethyl[H]'] = 'H NORMAL 120.024500 120.024500 0 H(8)C(4)O(2)S(1)'
mod_dict['ethylsulfonylethyl[K]'] = 'K NORMAL 120.024500 120.024500 0 H(8)C(4)O(2)S(1)'
mod_dict['glucosone[R]'] = 'R NORMAL 160.037173 160.037173 0 H(8)C(6)O(5)'
mod_dict['glycidamide[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 87.032028 87.032028 0 H(5)C(3)N(1)O(2)'
mod_dict['glycidamide[K]'] = 'K NORMAL 87.032028 87.032028 0 H(5)C(3)N(1)O(2)'
mod_dict['iTRAQ4plex[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['iTRAQ4plex114[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 144.105918 144.105918 0 H(12)C(5)13C(2)N(2)18O(1)'
mod_dict['iTRAQ4plex114[K]'] = 'K NORMAL 144.105918 144.105918 0 H(12)C(5)13C(2)N(2)18O(1)'
mod_dict['iTRAQ4plex114[Y]'] = 'Y NORMAL 144.105918 144.105918 0 H(12)C(5)13C(2)N(2)18O(1)'
mod_dict['iTRAQ4plex115[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 144.099599 144.099599 0 H(12)C(6)13C(1)N(1)15N(1)18O(1)'
mod_dict['iTRAQ4plex115[K]'] = 'K NORMAL 144.099599 144.099599 0 H(12)C(6)13C(1)N(1)15N(1)18O(1)'
mod_dict['iTRAQ4plex115[Y]'] = 'Y NORMAL 144.099599 144.099599 0 H(12)C(6)13C(1)N(1)15N(1)18O(1)'
mod_dict['iTRAQ8plex[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[H]'] = 'H NORMAL 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[K]'] = 'K NORMAL 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[S]'] = 'S NORMAL 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[T]'] = 'T NORMAL 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[Y]'] = 'Y NORMAL 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex[ProteinN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PRO_N 304.205360 304.205360 0 H(24)C(7)13C(7)N(3)15N(1)O(3)'
mod_dict['iTRAQ8plex_13C(6)15N(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 304.199040 304.199040 0 H(24)C(8)13C(6)N(2)15N(2)O(3)'
mod_dict['iTRAQ8plex_13C(6)15N(2)[K]'] = 'K NORMAL 304.199040 304.199040 0 H(24)C(8)13C(6)N(2)15N(2)O(3)'
mod_dict['iTRAQ8plex_13C(6)15N(2)[Y]'] = 'Y NORMAL 304.199040 304.199040 0 H(24)C(8)13C(6)N(2)15N(2)O(3)'
mod_dict['iodoTMT[C]'] = 'C NORMAL 324.216141 324.216141 0 H(28)C(16)N(4)O(3)'
mod_dict['iodoTMT[D]'] = 'D NORMAL 324.216141 324.216141 0 H(28)C(16)N(4)O(3)'
mod_dict['iodoTMT[E]'] = 'E NORMAL 324.216141 324.216141 0 H(28)C(16)N(4)O(3)'
mod_dict['iodoTMT[H]'] = 'H NORMAL 324.216141 324.216141 0 H(28)C(16)N(4)O(3)'
mod_dict['iodoTMT[K]'] = 'K NORMAL 324.216141 324.216141 0 H(28)C(16)N(4)O(3)'
mod_dict['iodoTMT6plex[C]'] = 'C NORMAL 329.226595 329.226595 0 H(28)C(12)13C(4)N(3)15N(1)O(3)'
mod_dict['iodoTMT6plex[D]'] = 'D NORMAL 329.226595 329.226595 0 H(28)C(12)13C(4)N(3)15N(1)O(3)'
mod_dict['iodoTMT6plex[E]'] = 'E NORMAL 329.226595 329.226595 0 H(28)C(12)13C(4)N(3)15N(1)O(3)'
mod_dict['iodoTMT6plex[H]'] = 'H NORMAL 329.226595 329.226595 0 H(28)C(12)13C(4)N(3)15N(1)O(3)'
mod_dict['iodoTMT6plex[K]'] = 'K NORMAL 329.226595 329.226595 0 H(28)C(12)13C(4)N(3)15N(1)O(3)'
mod_dict['lapachenole[C]'] = 'C NORMAL 240.115030 240.115030 0 H(16)C(16)O(2)'
mod_dict['mTRAQ[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ[H]'] = 'H NORMAL 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ[K]'] = 'K NORMAL 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ[S]'] = 'S NORMAL 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ[T]'] = 'T NORMAL 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ[Y]'] = 'Y NORMAL 140.094963 140.094963 0 H(12)C(7)N(2)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[H]'] = 'H NORMAL 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[K]'] = 'K NORMAL 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[S]'] = 'S NORMAL 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[T]'] = 'T NORMAL 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(3)15N(1)[Y]'] = 'Y NORMAL 144.102063 144.102063 0 H(12)C(4)13C(3)N(1)15N(1)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[AnyN-term]'] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ PEP_N 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[H]'] = 'H NORMAL 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[K]'] = 'K NORMAL 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[S]'] = 'S NORMAL 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[T]'] = 'T NORMAL 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['mTRAQ_13C(6)15N(2)[Y]'] = 'Y NORMAL 148.109162 148.109162 0 H(12)C(1)13C(6)15N(2)O(1)'
mod_dict['maleimide[C]'] = 'C NORMAL 97.016378 97.016378 0 H(3)C(4)N(1)O(2)'
mod_dict['maleimide[K]'] = 'K NORMAL 97.016378 97.016378 0 H(3)C(4)N(1)O(2)'
mod_dict['maleimide3[C]'] = 'C NORMAL 969.366232 969.366232 0 H(59)C(37)N(7)O(23)'
mod_dict['maleimide3[K]'] = 'K NORMAL 969.366232 969.366232 0 H(59)C(37)N(7)O(23)'
mod_dict['maleimide5[C]'] = 'C NORMAL 1293.471879 1293.471879 0 H(79)C(49)N(7)O(33)'
mod_dict['maleimide5[K]'] = 'K NORMAL 1293.471879 1293.471879 0 H(79)C(49)N(7)O(33)'
mod_dict['methylsulfonylethyl[C]'] = 'C NORMAL 106.008850 106.008850 0 H(6)C(3)O(2)S(1)'
mod_dict['methylsulfonylethyl[H]'] = 'H NORMAL 106.008850 106.008850 0 H(6)C(3)O(2)S(1)'
mod_dict['methylsulfonylethyl[K]'] = 'K NORMAL 106.008850 106.008850 0 H(6)C(3)O(2)S(1)'
mod_dict['phenylsulfonylethyl[C]'] = 'C NORMAL 168.024500 168.024500 0 H(8)C(8)O(2)S(1)'
mod_dict['phosphoRibosyl[D]'] = 'D NORMAL 282.023960 282.023960 0 H(9)C(5)N(5)O(7)P(1)'
mod_dict['phosphoRibosyl[E]'] = 'E NORMAL 282.023960 282.023960 0 H(9)C(5)N(5)O(7)P(1)'
mod_dict['phosphoRibosyl[R]'] = 'R NORMAL 282.023960 282.023960 0 H(9)C(5)N(5)O(7)P(1)'
mod_dict['probiotinhydrazide[P]'] = 'P NORMAL 258.115047 258.115047 0 H(18)C(10)N(4)O(2)S(1)'
mod_dict['pupylation[K]'] = 'K NORMAL 243.085521 243.085521 0 H(13)C(9)N(3)O(5)'
mod_dict['pyrophospho[S]'] = 'S NORMAL 159.932662 159.932662 1 176.935402 176.935402 H(2)O(6)P(2)'
mod_dict['pyrophospho[T]'] = 'T NORMAL 159.932662 159.932662 1 176.935402 176.935402 H(2)O(6)P(2)'
mod_dict['sulfo+amino[Y]'] = 'Y NORMAL 94.967714 94.967714 0 H(1)N(1)O(3)S(1)'
mod_dict['thioacylPA[K]'] = 'K NORMAL 159.035399 159.035399 0 H(9)C(6)N(1)O(2)S(1)'
mod_dict['trifluoro[L]'] = 'L NORMAL 53.971735 53.971735 0 H(-3)F(3)'
mod_dict['Glutaryl[K]'] = 'K NORMAL 114.031694 114.031694 0 H(6)C(5)O(3)'
mod_dict['Hydroxyisobutyryl[K]'] = 'K NORMAL 86.036779 86.036779 0 H(6)C(4)O(2)'
mod_dict['Malonyl[K]'] = 'K NORMAL 86.000394 86.000394 1 43.9898 43.9898 H(2)C(3)O(3)'
mod_dict['Trimethyl[K]'] = 'K NORMAL 42.046950 42.046950 0 H(6)C(3)'
mod_dict['Hydroxyproline[P]'] = 'P NORMAL 148.037173 148.037173 0 H(6)C(3)'
mod_dict['Dimethyl[K]'] = 'K NORMAL 28.031300 28.031300 0 H(4)C(2)'
return mod_dict
| 88.088326
| 146
| 0.635144
| 31,185
| 142,615
| 2.834953
| 0.050024
| 0.128031
| 0.09058
| 0.039702
| 0.814372
| 0.73844
| 0.693568
| 0.66503
| 0.649681
| 0.642487
| 0
| 0.30576
| 0.136402
| 142,615
| 1,619
| 147
| 88.088326
| 0.412059
| 0.00014
| 0
| 0
| 0
| 0.761434
| 0.750352
| 0.1539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000618
| false
| 0
| 0
| 0
| 0.001236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a7872d692a6f8a693c263e90ee24962820be405
| 13,149
|
py
|
Python
|
PCRF_sim/tests_PGW_CCR_Gx_client.py
|
fertiland/pyprotosim
|
b329c060f1cd521e264da8416249a02429f432f3
|
[
"BSD-2-Clause"
] | 12
|
2017-11-07T12:45:43.000Z
|
2022-02-10T12:36:49.000Z
|
PCRF_sim/tests_PGW_CCR_Gx_client.py
|
fertiland/pyprotosim
|
b329c060f1cd521e264da8416249a02429f432f3
|
[
"BSD-2-Clause"
] | 1
|
2019-02-12T09:25:55.000Z
|
2019-02-12T09:25:55.000Z
|
PCRF_sim/tests_PGW_CCR_Gx_client.py
|
fertiland/pyprotosim
|
b329c060f1cd521e264da8416249a02429f432f3
|
[
"BSD-2-Clause"
] | 5
|
2018-09-19T09:46:50.000Z
|
2020-08-20T09:46:53.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# 2014, PGW client tests are added by L.Belov <lavrbel@gmail.com>
# February 2012 - March 2014
# Version 0.1.0, Last change on Mar 12, 2014
# This software is distributed under the terms of BSD license.
###############################################################################
## FLOWS AND DESCRIPTION:
## Capabilities Exchange:
#1) PGW ---> CER -----> PCRF
#2) PGW <--- CEA <----- PCRF
## CCR Initial to PCRF, PCRF checks user is valid in SPR DB
## and reply with PCC Charging-Install Rule and QoS profile settings
#3) PGW ---> CCR-I ---> PCRF
#4) PCRF ---> SPR
#5) PGW <--- CCA-I <-- PCRF <--- SPR (PCC rule)
## CCR Update to PCRF, PCRF checks user is valid in SPR DB
## and reply with PCC Charging-Install Rule and QoS profile settings
#6) PGW ---> CCR-U ---> PCRF ---> SPR
#7) PCRF <--- SPR (PCC rule)
#8) PGW <--- CCA-U <--- PCRF
## CCR Terminate to PCRF, PCRF terminates and reply with 2001 Success
#9) PGW ---> CCR-T ---> PCRF
## Device-Watchdog Request to PCRF and 2001 Success Answer
#10) PGW ----> DWR ----> PCRF
#11) PGW <---- DWA <---- PCRF
## Disconnect Pear Request to PCRF and 2001 Success Answer
#12) PGW ---> DPR ----> PCRF
#13) PGW <--- DPA <---- PCRF
#################################################################
#Next two lines are to include parent directory for testing
import sys, time, os, subprocess
sys.path.append("..")
# Remove them normally
# PGW client - Gx protocol for tests with PCRF simulator
from libDiameter import *
if __name__ == '__main__':
# SET THIS TO YOUR PCRF SIMULATOR IP/PORT
HOST="127.0.0.1"
PORT=3868
ORIGIN_HOST="pgw.myrealm.example"
ORIGIN_REALM="myrealm.example"
DEST_REALM="myrealm.example"
DEST_HOST="pcrf.myrealm.example"
IDENTITY="1234567890" # This is msisdn of user in SPR DB
Conn=Connect(HOST,PORT)
LoadDictionary("../dictDiameter.xml")
###### FIRST WE CREATE CER and receive CEA ###########################
# Let's build CER
CER_avps=[ ]
CER_avps.append(encodeAVP('Origin-Host', 'pgw.myrealm.example'))
CER_avps.append(encodeAVP('Origin-Realm', 'myrealm.example'))
CER_avps.append(encodeAVP('Vendor-Id', 11111))
CER_avps.append(encodeAVP('Product-Name', 'PCEF'))
CER_avps.append(encodeAVP('Supported-Vendor-Id', 0))
CER_avps.append(encodeAVP('Supported-Vendor-Id', 10415))
CER_avps.append(encodeAVP('Supported-Vendor-Id', 11111))
CER_avps.append(encodeAVP('Auth-Application-Id', 16777238))
# Create message header (empty)
CER=HDRItem()
# Set command code
CER.cmd=dictCOMMANDname2code('Capabilities-Exchange')
# Set Hop-by-Hop and End-to-End
initializeHops(CER)
# Add AVPs to header and calculate remaining fields
msg=createReq(CER,CER_avps)
# msg now contains CER Request as hex string
# send data
Conn.send(msg.decode('hex'))
# Receive response
received = Conn.recv(1024)
# Parse and display received CEA ANSWER
print "="*30
print "THE CEA ANSWER IS:"
msg=received.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
# print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
print "SLEEP 2 sec"
time.sleep(2)
################## NEXT WE SEND CCR-I AND GET AN ANSWER FROM PCRF ###########################################
CCR_avps=[ ]
CCR_avps.append(encodeAVP('Origin-Host', ORIGIN_HOST))
CCR_avps.append(encodeAVP('Session-Id', 'pgw.myrealm.example;1094791309121_1385989500_428022'))
CCR_avps.append(encodeAVP('Called-Station-Id', 'test.apn'))
CCR_avps.append(encodeAVP('Origin-Realm', ORIGIN_REALM))
CCR_avps.append(encodeAVP('Destination-Realm', DEST_REALM))
CCR_avps.append(encodeAVP('Destination-Host', DEST_HOST))
CCR_avps.append(encodeAVP('Auth-Application-Id', 16777238))
CCR_avps.append(encodeAVP('CC-Request-Type', 1))
CCR_avps.append(encodeAVP('CC-Request-Number', 0))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', IDENTITY ), encodeAVP('Subscription-Id-Type', 0)]))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', '123456789101112'), encodeAVP('Subscription-Id-Type', 1)]))
CCR_avps.append(encodeAVP('3GPP-SGSN-Address', '192.168.0.2'))
CCR_avps.append(encodeAVP('3GPP-MS-TimeZone', 'GMT'))
CCR_avps.append(encodeAVP('3GPP-User-Location-Info', 'etwas'))
CCR_avps.append(encodeAVP('QoS-Information', [encodeAVP('APN-Aggregate-Max-Bitrate-UL', '500000000'), encodeAVP('APN-Aggregate-Max-Bitrate-DL', '1000000000')]))
CCR_avps.append(encodeAVP('3GPP-SGSN-MCC-MNC', '12345'))
CCR_avps.append(encodeAVP('Access-Network-Charging-Address', '192.168.0.1'))
# Create message header (empty)
# 3GPP Gx=16777238
# Create message header (empty)
CCR=HDRItem()
# Set command code
CCR.cmd=dictCOMMANDname2code('Credit-Control')
# Set Hop-by-Hop and End-to-End
initializeHops(CCR)
# Add AVPs to header and calculate remaining fields
msg1=createReq(CCR,CCR_avps)
# msg now contains CCR Request as hex string
# send data
Conn.send(msg1.decode('hex'))
# Receive response
received1 = Conn.recv(1024)
# Parse and display received ANSWER
print "="*30
print "THE CCA - I ANSWER IS:"
msg=received1.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
#print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
print "SLEEP 2 sec"
time.sleep(2)
#################### NOW SEND CCR-U REQUEST TO PCRF AND RECEIVE CCA-U ANSWER#############################################
CCR_avps=[ ]
CCR_avps.append(encodeAVP('Origin-Host', ORIGIN_HOST))
CCR_avps.append(encodeAVP('Session-Id', 'pgw.myrealm.example;1094791309121_1385989500_428022'))
CCR_avps.append(encodeAVP('Called-Station-Id', 'test.apn'))
CCR_avps.append(encodeAVP('Origin-Realm', ORIGIN_REALM))
CCR_avps.append(encodeAVP('Destination-Realm', DEST_REALM))
CCR_avps.append(encodeAVP('Destination-Host', DEST_HOST))
CCR_avps.append(encodeAVP('Auth-Application-Id', 16777238))
CCR_avps.append(encodeAVP('CC-Request-Type', 2))
CCR_avps.append(encodeAVP('CC-Request-Number', 1))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', IDENTITY ), encodeAVP('Subscription-Id-Type', 0)]))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', '123456789101112'), encodeAVP('Subscription-Id-Type', 1)]))
CCR_avps.append(encodeAVP('3GPP-SGSN-Address', '192.168.0.2'))
CCR_avps.append(encodeAVP('3GPP-MS-TimeZone', 'GMT'))
CCR_avps.append(encodeAVP('3GPP-User-Location-Info', 'etwas'))
CCR_avps.append(encodeAVP('QoS-Information', [encodeAVP('APN-Aggregate-Max-Bitrate-UL', '500000000'), encodeAVP('APN-Aggregate-Max-Bitrate-DL', '1000000000')]))
CCR_avps.append(encodeAVP('3GPP-SGSN-MCC-MNC', '12345'))
CCR_avps.append(encodeAVP('Access-Network-Charging-Address', '192.168.0.1'))
# Create message header (empty)
# 3GPP Gx=16777238
# Create message header (empty)
CCR=HDRItem()
# Set command code
CCR.cmd=dictCOMMANDname2code('Credit-Control')
# Set Hop-by-Hop and End-to-End
initializeHops(CCR)
# Add AVPs to header and calculate remaining fields
msg1=createReq(CCR,CCR_avps)
# msg now contains CCR Request as hex string
# send data
Conn.send(msg1.decode('hex'))
# Receive response
received1 = Conn.recv(1024)
# Parse and display received ANSWER
print "="*30
print "THE CCA - U ANSWER IS:"
msg=received1.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
#print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
print "SLEEP 2 sec"
time.sleep(2)
########test RAR-U
subprocess.call("./test_push_RAR-U.py", shell=True)
#################### NOW SEND CCR-T REQUEST TO PCRF AND RECEIVE CCA-T ANSWER #############################################
CCR_avps=[ ]
CCR_avps.append(encodeAVP('Origin-Host', ORIGIN_HOST))
CCR_avps.append(encodeAVP('Session-Id', 'pgw.myrealm.example;1094791309121_1385989500_428022'))
CCR_avps.append(encodeAVP('Called-Station-Id', 'test.apn'))
CCR_avps.append(encodeAVP('Origin-Realm', ORIGIN_REALM))
CCR_avps.append(encodeAVP('Destination-Realm', DEST_REALM))
CCR_avps.append(encodeAVP('Destination-Host', DEST_HOST))
CCR_avps.append(encodeAVP('Auth-Application-Id', 16777238))
CCR_avps.append(encodeAVP('CC-Request-Type', 3))
CCR_avps.append(encodeAVP('CC-Request-Number', 2))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', IDENTITY ), encodeAVP('Subscription-Id-Type', 0)]))
CCR_avps.append(encodeAVP('Subscription-Id',[encodeAVP('Subscription-Id-Data', '123456789101112'), encodeAVP('Subscription-Id-Type', 1)]))
CCR_avps.append(encodeAVP('3GPP-SGSN-Address', '192.168.0.2'))
CCR_avps.append(encodeAVP('3GPP-MS-TimeZone', 'GMT'))
CCR_avps.append(encodeAVP('3GPP-User-Location-Info', 'etwas'))
CCR_avps.append(encodeAVP('QoS-Information', [encodeAVP('APN-Aggregate-Max-Bitrate-UL', '500000000'), encodeAVP('APN-Aggregate-Max-Bitrate-DL', '1000000000')]))
CCR_avps.append(encodeAVP('3GPP-SGSN-MCC-MNC', '12345'))
CCR_avps.append(encodeAVP('Access-Network-Charging-Address', '192.168.0.1'))
# Create message header (empty)
# 3GPP Gx=16777238
# Create message header (empty)
CCR=HDRItem()
# Set command code
CCR.cmd=dictCOMMANDname2code('Credit-Control')
# Set Hop-by-Hop and End-to-End
initializeHops(CCR)
# Add AVPs to header and calculate remaining fields
msg1=createReq(CCR,CCR_avps)
# msg now contains CCR Request as hex string
# send data
Conn.send(msg1.decode('hex'))
# Receive response
received1 = Conn.recv(1024)
# Parse and display received ANSWER
print "="*30
print "THE CCA - T ANSWER IS:"
msg=received1.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
#print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
print "SLEEP 2 sec"
time.sleep(2)
################## NOW Watchdog and response ########################
# Let's build DWR
DWR_avps=[ ]
DWR_avps.append(encodeAVP('Origin-Host', 'pgw.myrealm.example'))
DWR_avps.append(encodeAVP('Origin-Realm', 'myrealm.example'))
DWR=HDRItem()
DWR.cmd=dictCOMMANDname2code('Device-Watchdog')
initializeHops(DWR)
msg=createReq(DWR,DWR_avps)
Conn.send(msg.decode('hex'))
# Receive response
received = Conn.recv(1024)
# Parse and display received ANSWER
print "="*30
print "THE DWA ANSWER IS:"
msg=received.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
# print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
print "SLEEP 2 sec"
time.sleep(2)
################## NOW DISCONNECT PEER REQUEST #######################
# Let's build DPR
DPR_avps=[ ]
DPR_avps.append(encodeAVP('Origin-Host', 'pgw.myrealm.example'))
DPR_avps.append(encodeAVP('Origin-Realm', 'myrealm.example'))
DPR_avps.append(encodeAVP('Disconnect-Cause', 'DO_NOT_WANT_TO_TALK_TO_YOU')) # tired :)
DPR=HDRItem()
DPR.cmd=dictCOMMANDname2code('Disconnect-Peer')
initializeHops(DPR)
msg=createReq(DPR,DPR_avps)
Conn.send(msg.decode('hex'))
# Receive response
received = Conn.recv(1024)
# Parse and display received ANSWER
print "="*30
print "THE DPA ANSWER IS:"
msg=received.encode('hex')
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
print "="*30
for avp in avps:
# print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
################## Closing connection ##############################
Conn.close()
| 33.037688
| 161
| 0.671154
| 1,803
| 13,149
| 4.831392
| 0.152524
| 0.07347
| 0.139594
| 0.128803
| 0.824245
| 0.814258
| 0.802434
| 0.743543
| 0.719894
| 0.715762
| 0
| 0.04669
| 0.135067
| 13,149
| 397
| 162
| 33.120907
| 0.719247
| 0.230284
| 0
| 0.763949
| 0
| 0
| 0.308781
| 0.058615
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.008584
| null | null | 0.253219
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8a7f51ab3efbf55ca7f402d03aab0af001d7326a
| 27,620
|
py
|
Python
|
tests/python/oper/test_stl_ct_boolean_and_temporal_offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | null | null | null |
tests/python/oper/test_stl_ct_boolean_and_temporal_offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | null | null | null |
tests/python/oper/test_stl_ct_boolean_and_temporal_offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | 1
|
2022-01-28T15:59:05.000Z
|
2022-01-28T15:59:05.000Z
|
import unittest
from rtamt.operation.stl.dense_time.offline.and_operation import AndOperation
from rtamt.operation.stl.dense_time.offline.not_operation import NotOperation
from rtamt.operation.stl.dense_time.offline.or_operation import OrOperation
from rtamt.operation.stl.dense_time.offline.implies_operation import ImpliesOperation
from rtamt.operation.stl.dense_time.offline.iff_operation import IffOperation
from rtamt.operation.stl.dense_time.offline.xor_operation import XorOperation
from rtamt.operation.stl.dense_time.offline.always_operation import AlwaysOperation
from rtamt.operation.stl.dense_time.offline.historically_operation import HistoricallyOperation
from rtamt.operation.stl.dense_time.offline.once_operation import OnceOperation
from rtamt.operation.stl.dense_time.offline.since_operation import SinceOperation
from rtamt.operation.stl.dense_time.offline.once_bounded_operation import OnceBoundedOperation
from rtamt.operation.stl.dense_time.offline.historically_bounded_operation import HistoricallyBoundedOperation
from rtamt.operation.stl.dense_time.offline.since_bounded_operation import SinceBoundedOperation
class TestSTLBooleanAndTemporalOffline(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestSTLBooleanAndTemporalOffline, self).__init__(*args, **kwargs)
def test_and(self):
oper = AndOperation()
in_data_1 = [[2, 2], [3.3, 3], [5.7, 4]]
in_data_2 = [[2.5, 5], [4.7, 6]]
out_expected = [[2.5, 2], [3.3, 3], [4.7, 3]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = [[2, 2], [3.3, 3], [4.7, 5]]
in_data_2 = [[2, 1], [3.3, 5], [4.7, 2]]
out_expected = [[2, 1], [3.3, 3], [4.7, 2]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = [[2, 2], [3.3, 3], [4.7, 5], [5, 5]]
in_data_2 = [[2, 1], [3.3, 5], [4.7, 3], [5, 5]]
out_expected = [[2, 1], [3.3, 3], [5, 5]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = [[2, 2], [3.3, 3]]
in_data_2 = [[4.7, 3], [5, 5]]
out_expected = []
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = []
in_data_2 = [[4.7, 3], [5, 5]]
out_expected = []
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, 1], [3.7, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 3], [7.5, 2], [8.1, 4]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 6th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AndOperation()
in_data_1 = [[1, 1], [2, 2], [3, 3]]
in_data_2 = [[3, 1], [4, 3], [7.5, 2]]
out_expected = []
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 7th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_or(self):
oper = OrOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, 2], [3.7, 3], [6.7, 4], [8.1, 6]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_iff(self):
oper = IffOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, -1], [4.1, -2], [5, -1], [6.1, -2], [6.7, -1], [7.5, -2], [8.1, -2]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_xor(self):
oper = XorOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, 1], [4.1, 2], [5, 1], [6.1, 2], [6.7, 1], [7.5, 2], [8.1, 2]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_implies(self):
oper = ImpliesOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_always(self):
oper = AlwaysOperation()
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [5.3, 1], [10, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AlwaysOperation()
in_data = [[5, 3]]
out_expected = [[5, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AlwaysOperation()
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AlwaysOperation()
in_data = [[5, 3], [6, 2], [7, 1]]
out_expected = [[5, 3], [6, 2], [7, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = AlwaysOperation()
in_data = [[5, 3], [6, 4], [7, 5]]
out_expected = [[5, 3], [7, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_historically(self):
oper = HistoricallyOperation()
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [5.3, 1], [10, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = HistoricallyOperation()
in_data = [[5, 3]]
out_expected = [[5, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = HistoricallyOperation()
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = HistoricallyOperation()
in_data = [[5, 3], [6, 2], [7, 1]]
out_expected = [[5, 3], [6, 2], [7, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = HistoricallyOperation()
in_data = [[5, 3], [6, 4], [7, 5]]
out_expected = [[5, 3], [7, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_once(self):
oper = OnceOperation()
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [6.5, 5], [6.75, 6], [10, 6]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = OnceOperation()
in_data = [[5, 3]]
out_expected = [[5, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = OnceOperation()
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = OnceOperation()
in_data = [[5, 3], [6, 2], [7, 1]]
out_expected = [[5, 3], [7, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = OnceOperation()
in_data = [[5, 3], [6, 4], [7, 5]]
out_expected = [[5, 3], [6, 4], [7, 5]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_since(self):
oper = SinceOperation()
in_data_1 = [[0, 3], [2, 4], [4, 6]]
in_data_2 = [[0, -1], [2, 5], [4, 6]]
out_expected = [[0, -1], [2, 4], [4, 6]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = SinceOperation()
in_data_1 = [[0, 6], [2, 9], [4, 6]]
in_data_2 = [[0, 7], [2, -5], [4, 6]]
out_expected = [[0, 6], [4, 6]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
oper = SinceOperation()
in_data_1 = [[1, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 4], [9.9, 5]]
in_data_2 = [[1.2, 1], [3.7, 3], [7.5, 2], [8.1, 6]]
out_expected = [[1.2, 1], [3.7, 2], [4.1, 1], [5, 2], [6.1, 1], [6.7, 3], [8.1, 4]]
out_computed = oper.update(in_data_1, in_data_2)
self.assertListEqual(out_expected, out_computed,
"Problem with 3d example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_once_0_1(self):
oper = OnceBoundedOperation(0, 1)
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [6.3, 2], [6.5, 5], [6.75, 6], [10, 5], [10.25, 4], [11, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 1], [0.5, 2], [1, 3], [1.5, 4], [2, 5]]
out_expected = [[0, 1], [0.5, 2], [1, 3], [1.5, 4], [3, 5]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 5], [0.5, 4], [1, 3], [1.5, 2], [2, 1]]
out_expected = [[0, 5], [1.5, 4], [2, 3], [2.5, 2], [3, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 1], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [6.3, 2], [6.5, 5], [7.75, 1], [9, 5], [10.25, 4], [11, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 1], [8.1, 2], [10, 3]]
out_expected = [[6, 2], [11, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 3], [8.1, 2], [10, 3]]
out_expected = [[6, 2], [8, 3], [9.1, 2], [11, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 6th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 7th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[2, 5]]
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 8th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_once_1_2(self):
oper = OnceBoundedOperation(1,2)
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[6,3], [7.3, 2], [7.5, 5], [7.75, 6], [11, 5], [11.25, 4], [12, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed, "Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (out_expected, out_computed))
in_data = [[0, 1], [0.5, 2], [1, 3], [1.5, 4], [2, 5]]
out_expected = [[1, 1], [1.5, 2], [2, 3], [2.5, 4], [4, 5]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed, "Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (out_expected, out_computed))
in_data = [[0, 5], [0.5, 4], [1, 3], [1.5, 2], [2, 1]]
out_expected = [[1, 5], [2.5, 4], [3, 3], [3.5, 2], [4, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed, "Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (out_expected, out_computed))
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 1], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[6, 3], [7.3, 2], [7.5, 5], [8.75, 1], [10, 5], [11.25, 4], [12, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6,2], [8, 1], [8.1, 2], [10, 3]]
out_expected = [[7, 2], [12, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 3], [8.1, 2], [10, 3]]
out_expected = [[7, 2], [9, 3], [10.1, 2], [12, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 6th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 7th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[2, 5]]
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 8th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_historically_0_1(self):
oper = HistoricallyBoundedOperation(0, 1)
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [5.3, 1], [6.75, 2], [7.5, 5], [7.75, 6], [9, 5], [9.25, 4], [11, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 1], [0.5, 2], [1, 3], [1.5, 4], [2, 5]]
out_expected = [[0, 1], [1.5, 2], [2, 3], [2.5, 4], [3, 5]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 5], [0.5, 4], [1, 3], [1.5, 2], [2, 1]]
out_expected = [[0, 5], [0.5, 4], [1, 3], [1.5, 2], [3, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 1], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, 3], [5.3, 1], [10, 4], [11, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 1], [8.1, 2], [10, 3]]
out_expected = [[6, 2], [8, 1], [9.1, 2], [11, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 3], [8.1, 2], [10, 3]]
out_expected = [[6, 2], [11, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 6th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 7th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[2, 5]]
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 8th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_historically_1_2(self):
oper = HistoricallyBoundedOperation(1, 2)
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[6, 3], [6.3, 1], [7.75, 2], [8.5, 5], [8.75, 6], [10, 5], [10.25, 4], [12, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 1], [0.5, 2], [1, 3], [1.5, 4], [2, 5]]
out_expected = [[1, 1], [2.5, 2], [3, 3], [3.5, 4], [4, 5]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 2nd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[0, 5], [0.5, 4], [1, 3], [1.5, 2], [2, 1]]
out_expected = [[1, 5], [1.5, 4], [2, 3], [2.5, 2], [4, 1]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 3rd example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 1], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[6, 3], [6.3, 1], [11, 4], [12, 2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 4th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 1], [8.1, 2], [10, 3]]
out_expected = [[7, 2], [9, 1], [10.1, 2], [12, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 5th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[6, 2], [8, 3], [8.1, 2], [10, 3]]
out_expected = [[7, 2], [12, 3]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 6th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = []
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 7th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
in_data = [[2, 5]]
out_expected = []
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 8th example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
def test_since_0_1(self):
oper = SinceBoundedOperation(0, 1)
in_data_1 = [[0, 3], [2, 4], [4, 6]]
in_data_2 = [[0, -1], [2, 5], [4, 6]]
out_expected = [[0, -1], [2, 4]]
out_computed = oper.update(in_data_1, in_data_2)
# self.assertListEqual(out_expected, out_computed,
# "Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
# out_expected, out_computed))
def test_not(self):
oper = NotOperation()
in_data = [[5, 3], [5.3, 1], [5.75, 2], [6.5, 5], [6.75, 6], [9, 5], [9.25, 4], [10, 2]]
out_expected = [[5, -3], [5.3, -1], [5.75, -2], [6.5, -5], [6.75, -6], [9, -5], [9.25, -4], [10, -2]]
out_computed = oper.update(in_data)
self.assertListEqual(out_expected, out_computed,
"Problem with 1st example:\nExpected output: %s\nComputed output: %s" % (
out_expected, out_computed))
if __name__ == '__main__':
unittest.main()
| 47.213675
| 158
| 0.523063
| 3,586
| 27,620
| 3.843558
| 0.023703
| 0.150838
| 0.142204
| 0.223464
| 0.925488
| 0.9244
| 0.916927
| 0.89117
| 0.871871
| 0.87013
| 0
| 0.080655
| 0.323063
| 27,620
| 585
| 159
| 47.213675
| 0.656522
| 0.007133
| 0
| 0.790287
| 0
| 0
| 0.151751
| 0
| 0
| 0
| 0
| 0
| 0.136865
| 1
| 0.03532
| false
| 0
| 0.030905
| 0
| 0.068433
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8a9467a6d01fdc074b50a9502fdd49cb882243a7
| 68,574
|
py
|
Python
|
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/cmp_tonto/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/cmp_tonto/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/cmp_tonto/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.17631,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.341171,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.806988,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.678107,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.17424,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.673457,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.5258,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.546557,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.50583,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.152457,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0245819,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.249667,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.181798,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.402124,
'Execution Unit/Register Files/Runtime Dynamic': 0.20638,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.6516,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.66803,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.14676,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00323952,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00323952,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00281254,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00108381,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00261155,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0119031,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0313847,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.174767,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.498264,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.593588,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.30991,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0417119,
'L2/Runtime Dynamic': 0.00895503,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.65323,
'Load Store Unit/Data Cache/Runtime Dynamic': 3.09794,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.207576,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.207577,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 8.63744,
'Load Store Unit/Runtime Dynamic': 4.32921,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.511848,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.0237,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.181657,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.182043,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0823927,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.872543,
'Memory Management Unit/Runtime Dynamic': 0.264436,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.588,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.531889,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.041075,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.345677,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.918641,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 11.9779,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.09189,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.274863,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.419705,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.253039,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.408142,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.206016,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.867197,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.225057,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.04672,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0792913,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0106136,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.114263,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0784939,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.193555,
'Execution Unit/Register Files/Runtime Dynamic': 0.0891075,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.263699,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.653585,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.29013,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0013419,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0013419,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0012059,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00048712,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00112757,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00501726,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0115401,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0754582,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.79979,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212928,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.25629,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.25125,
'Instruction Fetch Unit/Runtime Dynamic': 0.561233,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0199704,
'L2/Runtime Dynamic': 0.00446198,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.99851,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.33528,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0893374,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0893374,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.42038,
'Load Store Unit/Runtime Dynamic': 1.86519,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.220291,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.440582,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0781819,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0783831,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.298433,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0351985,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.588845,
'Memory Management Unit/Runtime Dynamic': 0.113582,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.9166,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.208579,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0139548,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.125795,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.348329,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.18293,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0796071,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.265215,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.368265,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.242529,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.39119,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.197459,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.831177,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.220921,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.93944,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0695731,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0101727,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.105871,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0752336,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.175444,
'Execution Unit/Register Files/Runtime Dynamic': 0.0854064,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.242947,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.6168,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.20398,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00137678,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00137678,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0012417,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000503941,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00108074,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.005076,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0116811,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.072324,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.60043,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.201174,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.245645,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.04221,
'Instruction Fetch Unit/Runtime Dynamic': 0.5359,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0198483,
'L2/Runtime Dynamic': 0.00437042,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.92192,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.29772,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0868595,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0868595,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.33209,
'Load Store Unit/Runtime Dynamic': 1.81294,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.214181,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.428361,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0760135,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0762049,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.286038,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.033295,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.572724,
'Memory Management Unit/Runtime Dynamic': 0.1095,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.4958,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.183015,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0131695,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.121033,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.317218,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.9839,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0694583,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.257244,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.322554,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.23485,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.378805,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.191208,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.804862,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.219148,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.85021,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0609374,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00985067,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0993729,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0728517,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.16031,
'Execution Unit/Register Files/Runtime Dynamic': 0.0827024,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.226719,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.579138,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.12932,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00147251,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00147251,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00132897,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000539855,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00104652,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00532051,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0124598,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0700342,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.45478,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.194678,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.237868,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.8895,
'Instruction Fetch Unit/Runtime Dynamic': 0.52036,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0190747,
'L2/Runtime Dynamic': 0.00390663,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.78025,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.22766,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0822764,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0822764,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.16878,
'Load Store Unit/Runtime Dynamic': 1.71569,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.202879,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.405759,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0720026,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.072176,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.276982,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0322494,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.556779,
'Memory Management Unit/Runtime Dynamic': 0.104425,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.0738,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.160298,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0125466,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.117527,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.290371,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.76408,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.03692902720675173,
'Runtime Dynamic': 0.03692902720675173,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.0450672,
'Runtime Dynamic': 0.0273526,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 92.1192,
'Peak Power': 125.231,
'Runtime Dynamic': 26.9362,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 92.0742,
'Total Cores/Runtime Dynamic': 26.9088,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.0450672,
'Total L3s/Runtime Dynamic': 0.0273526,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.026258
| 124
| 0.68192
| 8,082
| 68,574
| 5.780005
| 0.067558
| 0.123646
| 0.113028
| 0.093505
| 0.940189
| 0.931584
| 0.918226
| 0.886822
| 0.862204
| 0.843066
| 0
| 0.131377
| 0.224444
| 68,574
| 914
| 125
| 75.026258
| 0.746987
| 0
| 0
| 0.642232
| 0
| 0
| 0.657747
| 0.048122
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8abe885e1e51d2656c9585d55593723662bdce96
| 11,373
|
py
|
Python
|
keystoneclient/tests/v3/test_roles.py
|
citrix-openstack-build/python-keystoneclient
|
e170955d6de5cbf521d54105bdefaf606ccdb356
|
[
"Apache-1.1"
] | null | null | null |
keystoneclient/tests/v3/test_roles.py
|
citrix-openstack-build/python-keystoneclient
|
e170955d6de5cbf521d54105bdefaf606ccdb356
|
[
"Apache-1.1"
] | null | null | null |
keystoneclient/tests/v3/test_roles.py
|
citrix-openstack-build/python-keystoneclient
|
e170955d6de5cbf521d54105bdefaf606ccdb356
|
[
"Apache-1.1"
] | 1
|
2019-03-08T07:21:48.000Z
|
2019-03-08T07:21:48.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import httpretty
from keystoneclient import exceptions
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import roles
class RoleTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(RoleTests, self).setUp()
self.key = 'role'
self.collection_key = 'roles'
self.model = roles.Role
self.manager = self.client.roles
def new_ref(self, **kwargs):
kwargs = super(RoleTests, self).new_ref(**kwargs)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
@httpretty.activate
def test_domain_role_grant(self):
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.PUT,
['domains', domain_id, 'users', user_id,
self.collection_key, ref['id']],
status=201)
self.manager.grant(role=ref['id'], domain=domain_id, user=user_id)
@httpretty.activate
def test_domain_group_role_grant(self):
group_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.PUT,
['domains', domain_id, 'groups', group_id,
self.collection_key, ref['id']],
status=201)
self.manager.grant(role=ref['id'], domain=domain_id, group=group_id)
@httpretty.activate
def test_domain_role_list(self):
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref_list = [self.new_ref(), self.new_ref()]
self.stub_entity(httpretty.GET,
['domains', domain_id, 'users', user_id,
self.collection_key], entity=ref_list)
self.manager.list(domain=domain_id, user=user_id)
@httpretty.activate
def test_domain_group_role_list(self):
group_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref_list = [self.new_ref(), self.new_ref()]
self.stub_entity(httpretty.GET,
['domains', domain_id, 'groups', group_id,
self.collection_key], entity=ref_list)
self.manager.list(domain=domain_id, group=group_id)
@httpretty.activate
def test_domain_role_check(self):
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.HEAD,
['domains', domain_id, 'users', user_id,
self.collection_key, ref['id']],
status=204)
self.manager.check(role=ref['id'], domain=domain_id,
user=user_id)
@httpretty.activate
def test_domain_group_role_check(self):
return
group_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.HEAD,
['domains', domain_id, 'groups', group_id,
self.collection_key, ref['id']],
status=204)
self.manager.check(role=ref['id'], domain=domain_id, group=group_id)
@httpretty.activate
def test_domain_role_revoke(self):
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.DELETE,
['domains', domain_id, 'users', user_id,
self.collection_key, ref['id']],
status=204)
self.manager.revoke(role=ref['id'], domain=domain_id, user=user_id)
@httpretty.activate
def test_domain_group_role_revoke(self):
group_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.DELETE,
['domains', domain_id, 'groups', group_id,
self.collection_key, ref['id']],
status=204)
self.manager.revoke(role=ref['id'], domain=domain_id, group=group_id)
@httpretty.activate
def test_project_role_grant(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.PUT,
['projects', project_id, 'users', user_id,
self.collection_key, ref['id']],
status=201)
self.manager.grant(role=ref['id'], project=project_id, user=user_id)
@httpretty.activate
def test_project_group_role_grant(self):
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.PUT,
['projects', project_id, 'groups', group_id,
self.collection_key, ref['id']],
status=201)
self.manager.grant(role=ref['id'], project=project_id, group=group_id)
@httpretty.activate
def test_project_role_list(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref_list = [self.new_ref(), self.new_ref()]
self.stub_entity(httpretty.GET,
['projects', project_id, 'users', user_id,
self.collection_key], entity=ref_list)
self.manager.list(project=project_id, user=user_id)
@httpretty.activate
def test_project_group_role_list(self):
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref_list = [self.new_ref(), self.new_ref()]
self.stub_entity(httpretty.GET,
['projects', project_id, 'groups', group_id,
self.collection_key], entity=ref_list)
self.manager.list(project=project_id, group=group_id)
@httpretty.activate
def test_project_role_check(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.HEAD,
['projects', project_id, 'users', user_id,
self.collection_key, ref['id']],
status=200)
self.manager.check(role=ref['id'], project=project_id, user=user_id)
@httpretty.activate
def test_project_group_role_check(self):
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.HEAD,
['projects', project_id, 'groups', group_id,
self.collection_key, ref['id']],
status=200)
self.manager.check(role=ref['id'], project=project_id, group=group_id)
@httpretty.activate
def test_project_role_revoke(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.DELETE,
['projects', project_id, 'users', user_id,
self.collection_key, ref['id']],
status=204)
self.manager.revoke(role=ref['id'], project=project_id, user=user_id)
@httpretty.activate
def test_project_group_role_revoke(self):
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.stub_url(httpretty.DELETE,
['projects', project_id, 'groups', group_id,
self.collection_key, ref['id']],
status=204)
self.manager.revoke(role=ref['id'], project=project_id, group=group_id)
@httpretty.activate
def test_domain_project_role_grant_fails(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.grant,
role=ref['id'],
domain=domain_id,
project=project_id,
user=user_id)
def test_domain_project_role_list_fails(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
self.assertRaises(
exceptions.ValidationError,
self.manager.list,
domain=domain_id,
project=project_id,
user=user_id)
def test_domain_project_role_check_fails(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.check,
role=ref['id'],
domain=domain_id,
project=project_id,
user=user_id)
def test_domain_project_role_revoke_fails(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.revoke,
role=ref['id'],
domain=domain_id,
project=project_id,
user=user_id)
def test_user_group_role_grant_fails(self):
user_id = uuid.uuid4().hex
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.grant,
role=ref['id'],
project=project_id,
group=group_id,
user=user_id)
def test_user_group_role_list_fails(self):
user_id = uuid.uuid4().hex
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
self.assertRaises(
exceptions.ValidationError,
self.manager.list,
project=project_id,
group=group_id,
user=user_id)
def test_user_group_role_check_fails(self):
user_id = uuid.uuid4().hex
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.check,
role=ref['id'],
project=project_id,
group=group_id,
user=user_id)
def test_user_group_role_revoke_fails(self):
user_id = uuid.uuid4().hex
group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
ref = self.new_ref()
self.assertRaises(
exceptions.ValidationError,
self.manager.revoke,
role=ref['id'],
project=project_id,
group=group_id,
user=user_id)
| 32.21813
| 79
| 0.578651
| 1,362
| 11,373
| 4.604993
| 0.094714
| 0.081792
| 0.109056
| 0.125
| 0.86081
| 0.859694
| 0.854911
| 0.854911
| 0.847895
| 0.833068
| 0
| 0.013501
| 0.309681
| 11,373
| 352
| 80
| 32.309659
| 0.785378
| 0.052141
| 0
| 0.786517
| 0
| 0
| 0.026103
| 0
| 0
| 0
| 0
| 0
| 0.029963
| 1
| 0.097378
| false
| 0
| 0.018727
| 0
| 0.127341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a0d77b66fc1eafb1f5b6cdc79363b84340214fd
| 78,260
|
py
|
Python
|
autotest/autotest_services/tests/wcs/test_v20.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | null | null | null |
autotest/autotest_services/tests/wcs/test_v20.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | null | null | null |
autotest/autotest_services/tests/wcs/test_v20.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | null | null | null |
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from urllib import quote
from autotest_services import base as testbase
import base as wcsbase
#===============================================================================
# WCS 2.0 Get Capabilities
#===============================================================================
class WCS20GetCapabilitiesValidTestCase(testbase.XMLTestCase, testbase.SchematronTestMixIn):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities response"""
schematron_locations = ["http://schemas.opengis.net/wcs/crs/",
"http://schemas.opengis.net/wcs/crs/1.0/wcsCrs.sch"]
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities"
return (params, "kvp")
class WCS20GetCapabilitiesEmptyTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid but empty WCS 2.0 EO-AP (EO-WCS) GetCapabilities response (see #41)"""
fixtures = testbase.BASE_FIXTURES
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities"
return (params, "kvp")
class WCSVersionNegotiationTestCase(testbase.XMLTestCase):
"""This test shall check version negotiation. A valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities response shall be returned"""
def getRequest(self):
params = "service=wcs&request=GetCapabilities"
return (params, "kvp")
class WCSVersionNegotiationOldStyleTestCase(testbase.XMLTestCase):
"""This test shall check old style version negotiation. A valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities response shall be returned"""
def getRequest(self):
params = "service=wcs&version=3.0.0&request=GetCapabilities"
return (params, "kvp")
class WCSVersionNegotiationNewStyleTestCase(testbase.XMLTestCase):
"""This test shall check new style version negotiation. A valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities response shall be returned"""
def getRequest(self):
params = "service=wcs&acceptversions=2.0.1,1.1.0&request=GetCapabilities"
return (params, "kvp")
class WCSVersionNegotiationFaultTestCase(testbase.ExceptionTestCase):
"""This test shall check new style version negotiation. A valid ows:ExceptionReport shall be returned"""
def getRequest(self):
params = "service=wcs&acceptversions=3.0.0&request=GetCapabilities"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "VersionNegotiationFailed"
class WCS20GetCapabilitiesSectionsAllTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=All"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsAll2TestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=ServiceIdentification,ServiceProvider,OperationsMetadata,ServiceMetadata,Contents"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsAll3TestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=ServiceIdentification,ServiceProvider,OperationsMetadata,ServiceMetadata,CoverageSummary,DatasetSeriesSummary"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsServiceIdentificationTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=ServiceIdentification"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsContentsTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=Contents"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsCoverageSummaryTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=CoverageSummary"
return (params, "kvp")
class WCS20GetCapabilitiesSectionsDatasetSeriesSummaryTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities
response including all sections"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities§ions=DatasetSeriesSummary"
return (params, "kvp")
#===============================================================================
# WCS 2.0 DescribeCoverage
#===============================================================================
class WCS20DescribeCoverageDatasetTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeCoverage response for a wcseo:RectifiedDataset."""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
return (params, "kvp")
class WCS20DescribeCoverageMosaicTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeCoverage response for a wcseo:RectifiedStitchedMosaic."""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB"
return (params, "kvp")
class WCS20DescribeCoverageDatasetSeriesFaultTestCase(testbase.ExceptionTestCase):
"""This test shall try to retrieve a CoverageDescription for a non-coverage. It shall yield a valid ows:ExceptionReport"""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=MER_FRS_1P_reduced"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "NoSuchCoverage"
class WCS20DescribeCoverageFaultTestCase(testbase.ExceptionTestCase):
"""This test shall try to retrieve a CoverageDescription for a coverage that does not exist. It shall yield a valid ows:ExceptionReport"""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=some_coverage"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "NoSuchCoverage"
class WCS20DescribeCoverageMissingParameterFaultTestCase(testbase.ExceptionTestCase):
"""This test shall yield a valid ows:ExceptionReport for a missing parameter"""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "MissingParameterValue"
#===============================================================================
# WCS 2.0 DescribeEOCoverageSet
#===============================================================================
class WCS20DescribeEOCoverageSetDatasetTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeEOCoverageSet response for a wcseo:RectifiedDataset"""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
return (params, "kvp")
class WCS20DescribeEOCoverageSetMosaicTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeEOCoverageSet response for a wcseo:RectifiedStitchedMosaic"""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=mosaic_MER_FRS_1P_reduced_RGB"
return (params, "kvp")
class WCS20DescribeEOCoverageSetDatasetSeriesTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeEOCoverageSet response for a wcseo:RectifiedDatasetSeries."""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced"
return (params, "kvp")
class WCS20DescribeEOCoverageSetFaultTestCase(testbase.ExceptionTestCase):
"""This test shall try to retrieve a CoverageDescription set for an wcseo-Object that does not exist. It shall yield a valid ows:ExceptionReport."""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=some_eo_object"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "NoSuchDatasetSeriesOrCoverage"
class WCS20DescribeEOCoverageSetMissingParameterFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "MissingParameterValue"
class WCS20DescribeEOCoverageSetTwoSpatialSubsetsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=lat(32,47)&subset=long(11,33)"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTwoSpatialSubsetsOverlapsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=lat(32,47)&subset=long(11,33)&containment=overlaps"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTwoSpatialSubsetsContainsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=lat(32,47)&subset=long(11,33)&containment=contains"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTemporalSubsetTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTemporalSubsetOverlapsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")&containment=overlaps'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTemporalSubsetOverlapsIntervalBorderTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime(\"2006-08-01\",\"2006-08-16T09:09:29Z\")&containment=overlaps"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTemporalSubsetContainsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")&containment=contains'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetTemporalSubsetContainsIntervalBorderTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime(\"2006-08-01\",\"2006-08-16T09:12:46Z\")&containment=contains"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetSpatioTemporalSubsetTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")&subset=lat(32,47)&subset=long(11,33)'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetSpatioTemporalSubsetOverlapsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")&subset=lat(32,47)&subset=long(11,33)&containment=overlaps'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetSpatioTemporalSubsetContainsTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-08-22T09:22:00Z")&subset=lat(32,47)&subset=long(11,33)&containment=contains'
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetIncorrectTemporalSubsetFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime(2006-08-01,2006-08-22)"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidSubsetting"
class WCS20DescribeEOCoverageSetInvalidTemporalSubsetFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = 'service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=phenomenonTime("2006-08-01","2006-31-31")'
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidSubsetting"
class WCS20DescribeEOCoverageSetIncorrectSpatialSubsetFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=lat(some_lat,some_other_lat)"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidSubsetting"
class WCS20DescribeEOCoverageSetInvalidSpatialSubsetFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=lat(47,32)"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidSubsetting"
# EOxServer allows and understands certain additional axis labels like "lat", or "long".
class WCS20DescribeEOCoverageSetInvalidAxisLabelFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced&subset=x_axis(32,47)"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidAxisLabel"
#===============================================================================
# WCS 2.0: Paging testcases
#===============================================================================
class WCS20DescribeEOCoverageSetDatasetPagingCountTestCase(testbase.WCS20DescribeEOCoverageSetPagingTestCase):
def getExpectedCoverageCount(self):
return 1
def getExpectedDatasetSeriesCount(self):
return 1
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced&count=2"
return (params, "kvp")
#===============================================================================
# WCS 2.0: Section test cases
#===============================================================================
class WCS20DescribeEOCoverageSetSectionsAllTestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/2.0}CoverageDescriptions",
"{http://www.opengis.net/wcs/wcseo/1.0}DatasetSeriesDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=All"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsAll2TestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/2.0}CoverageDescriptions",
"{http://www.opengis.net/wcs/wcseo/1.0}DatasetSeriesDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=CoverageDescriptions,DatasetSeriesDescriptions"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsAll3TestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/2.0}CoverageDescriptions",
"{http://www.opengis.net/wcs/wcseo/1.0}DatasetSeriesDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=All,DatasetSeriesDescriptions"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsAll4TestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/2.0}CoverageDescriptions",
"{http://www.opengis.net/wcs/wcseo/1.0}DatasetSeriesDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=CoverageDescriptions,All"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsCoverageDescriptionsTestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/2.0}CoverageDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=CoverageDescriptions"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsDatasetSeriesDescriptionsTestCase(testbase.WCS20DescribeEOCoverageSetSectionsTestCase):
def getExpectedSections(self):
return [
"{http://www.opengis.net/wcs/wcseo/1.0}DatasetSeriesDescriptions"
]
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeEOCoverageSet&eoId=MER_FRS_1P_reduced§ions=DatasetSeriesDescriptions"
return (params, "kvp")
class WCS20DescribeEOCoverageSetSectionsFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced§ions=WrongSection"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 400
def getExpectedExceptionCode(self):
return "InvalidParameterValue"
class WCS20DescribeEOCoverageSetDatasetUniqueTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed,MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed"
]
class WCS20DescribeEOCoverageSetDatasetOutOfSubsetTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced,mosaic_MER_FRS_1P_reduced_RGB,MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed&subset=lat(0,1)&subset=long(0,1)"
return (params, "kvp")
def getExpectedCoverageIds(self):
return []
class WCS20DescribeEOCoverageSetDatasetSeriesStitchedMosaicTestCase(testbase.WCS20DescribeEOCoverageSetSubsettingTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=DescribeEOCoverageSet&EOID=MER_FRS_1P_reduced,mosaic_MER_FRS_1P_reduced_RGB"
return (params, "kvp")
def getExpectedCoverageIds(self):
return [
"MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed",
"MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_uint16_reduced_compressed",
"mosaic_MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_RGB_reduced",
"mosaic_MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_RGB_reduced",
"mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced",
"mosaic_MER_FRS_1P_reduced_RGB"
]
#===============================================================================
# WCS 2.0: Exceptions
#===============================================================================
# after WCS 2.0.1 implementation does not lead to an error anymore
#class WCS20GetCoverageFormatMissingFaultTestCase(testbase.ExceptionTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB"
# return (params, "kvp")
#
# def getExpectedExceptionCode(self):
# return "MissingParameterValue"
class WCS20GetCoverageNoSuchCoverageFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=INVALID"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "NoSuchCoverage"
class WCS20GetCoverageFormatUnsupportedFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/jpeg"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "InvalidParameterValue"
class WCS20GetCoverageFormatUnknownFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=unknown"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "InvalidParameterValue"
#===============================================================================
# WCS 2.0: Simple requests
#===============================================================================
class WCS20GetCoverageMosaicTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff"
return (params, "kvp")
class WCS20GetCoverageDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff"
return (params, "kvp")
#==============================================================================
# WCS 2.0: Formats
#==============================================================================
# WCS 2.0.1 introduced the native format, i.e., default format in case of missing format specification
class WCS20GetCoverageNativeTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB"
return (params, "kvp")
def getFileExtension(self, part=None):
return "tif"
class WCS20GetCoverageJPEG2000TestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/jp2"
return (params, "kvp")
def getFileExtension(self, part=None):
return "jp2"
class WCS20GetCoverageNetCDFTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=application/x-netcdf"
return (params, "kvp")
def getFileExtension(self, part=None):
return "nc"
class WCS20GetCoverageHDFTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=application/x-hdf"
return (params, "kvp")
def getFileExtension(self, part=None):
return "hdf"
# TODO: Enable test once subdatasets are supported (see #123):
#class WCS20GetCoverageNetCDFInputTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_netCDF&format=image/tiff"
# return (params, "kvp")
class WCS20GetCoverageJPEG2000InputTestCase(testbase.RectifiedGridCoverageTestCase):
fixtures = testbase.RectifiedGridCoverageTestCase.fixtures + ["meris_coverages_jpeg2000.json"]
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_RGB_reduced_JPEG2000&format=image/tiff"
return (params, "kvp")
#===============================================================================
# WCS 2.0: Multipart requests
#===============================================================================
class WCS20GetCoverageMultipartMosaicTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&mediatype=multipart/related"
return (params, "kvp")
class WCS20GetCoverageMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related"
return (params, "kvp")
# TODO: wrong multipart parameters only result in non-multipart images. Uncomment, when implemented
#class WCS20GetCoverageWrongMultipartParameterFaultTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.ExceptionTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&mediatype=multipart/something"
# return (params, "kvp")
#
# def getExpectedExceptionCode(self):
# return "InvalidParameterValue"
#===============================================================================
# WCS 2.0: Subset requests
#===============================================================================
class WCS20GetCoverageSubsetDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=x(100,200)&subset=y(200,300)"
return (params, "kvp")
class WCS20GetCoverageMultipartSubsetMosaicTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&mediatype=multipart/related&subset=x(100,1000)&subset=y(0,99)"
return (params, "kvp")
class WCS20GetCoverageMultipartSubsetDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&subset=x(100,200)&subset=y(200,300)"
return (params, "kvp")
class WCS20GetCoverageSubsetEPSG4326DatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=lat(38,40)&subset=long(20,22)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageSubsetEPSG4326MosaicTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&subset=lat(38,40)&subset=long(0,30)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageSubsetInvalidEPSGFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&subset=x(38,40)&subset=y(20,22)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/99999"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "SubsettingCrs-NotSupported"
def getExpectedHTTPStatus(self):
return 404
#===============================================================================
# WCS 2.0: OutputCRS
#===============================================================================
class WCS20GetCoverageOutputCRSDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035"
return (params, "kvp")
class WCS20GetCoverageOutputCRSotherUoMDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3857"
return (params, "kvp")
class WCS20GetCoverageOutputCrsEPSGFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&outputcrs=http://www.opengis.net/def/crs/EPSG/0/99999"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "OutputCrs-NotSupported"
def getExpectedHTTPStatus(self):
return 404
#===============================================================================
# WCS 2.0: Size
#===============================================================================
class WCS20GetCoverageSizeDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&scalesize=x(200),y(200)"
return (params, "kvp")
class WCS20GetCoverageSizeMosaicTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&scalesize=x(200),y(400)"
return (params, "kvp")
class WCS20GetCoverageSubsetSizeDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=x(100,200)&subset=y(200,300)&scalesize=x(20),y(20)"
return (params, "kvp")
class WCS20GetCoverageSubsetEPSG4326SizeDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&subset=lat(38,40)&subset=long(20,22)&scalesize=lat(20),long(20)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageSubsetEPSG4326SizeExceedsExtentDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=lat(10,50)&subset=long(0,50)&scalesize=lat(100),long(100)&mediatype=multipart/related&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageInvalidSizeFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&scalesize=x(1.11)"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 404
def getExpectedExceptionCode(self):
return "InvalidScaleFactor"
#===============================================================================
# WCS 2.0: Resolution
#===============================================================================
# TODO: not supported anymore (WCS 2.0 Scaling Extension)
#class WCS20GetCoverageResolutionDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&resolution=x(0.1)&resolution=y(0.1)"
# return (params, "kvp")
#
#class WCS20GetCoverageResolutionMosaicTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1P_reduced_RGB&format=image/tiff&resolution=x(0.1)&resolution=y(0.1)"
# return (params, "kvp")
#
#class WCS20GetCoverageSubsetResolutionDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=x(100,200)&subset=y(200,300)&resolution=x(0.1)&resolution=y(0.1)"
# return (params, "kvp")
#
#class WCS20GetCoverageSubsetEPSG4326ResolutionLatLonDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=lat(38,40)&subset=long(20,22)&resolution=lat(0.01)&resolution=long(0.01)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
# return (params, "kvp")
#
#class WCS20GetCoverageSubsetEPSG4326ResolutionInvalidAxisDatasetFaultTestCase(testbase.ExceptionTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=lat(38,40)&subset=long(20,22)&resolution=x(0.01)&resolution=y(0.01)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
# return (params, "kvp")
#
# def getExpectedExceptionCode(self):
# return "InvalidParameterValue"
#===============================================================================
# WCS 2.0: Rangesubset
#===============================================================================
class WCS20GetCoverageRangeSubsetIntervalDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&rangesubset=MERIS_radiance_01_uint16:MERIS_radiance_03_uint16"
return (params, "kvp")
class WCS20GetCoverageRangeSubsetNamesDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&rangesubset=MERIS_radiance_04_uint16,MERIS_radiance_05_uint16,MERIS_radiance_06_uint16"
return (params, "kvp")
class WCS20GetCoverageRangeSubsetNamesPNGDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/png&rangesubset=MERIS_radiance_01_uint16"
return (params, "kvp")
def getFileExtension(self, part=None):
return "png"
class WCS20GetCoverageRangeSubsetItemIntervalDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/png&rangesubset=MERIS_radiance_01_uint16,MERIS_radiance_03_uint16:MERIS_radiance_04_uint16"
return (params, "kvp")
def getFileExtension(self, part=None):
return "png"
class WCS20GetCoverageMultipartRangeSubsetNamesDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mediatype=multipart/related&rangesubset=MERIS_radiance_04_uint16,MERIS_radiance_05_uint16,MERIS_radiance_06_uint16"
return (params, "kvp")
# TODO: not supported anymore (WCS 2.0 Scaling Extension)
#class WCS20GetCoverageSubsetSizeResolutionOutputCRSRangeSubsetIntervalDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&subset=x(100,200)&subset=y(200,300)&size=y(100)&resolution=x(0.1)&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&rangesubset=MERIS_radiance_01_uint16:MERIS_radiance_03_uint16&mediatype=multipart/related"
# return (params, "kvp")
#===============================================================================
# WCS 2.0: Polygon Mask
#===============================================================================
# TODO: Enable these tests once the feature is implemented in MapServer
#class WCS20GetCoveragePolygonMaskTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mask=polygon(14.124422306243844,42.806626286621963,21.208516509273601,43.730638573973678,21.208516509273601,43.730638573973678,21.892970055460054,37.8443380767702,15.04843459359555,36.646544370943914,12.379065763468395,39.555471942236323,14.124422306243844,42.806626286621963)"
# return (params, "kvp")
#class WCS20GetCoveragePolygonMaskProjectedTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self): # TODO: swap axes
# params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&mask=polygon,http://www.opengis.net/def/crs/EPSG/0/4326(42.806626286621963,14.124422306243844,43.730638573973678,21.208516509273601,43.730638573973678,21.208516509273601,37.8443380767702,21.892970055460054,36.646544370943914,15.04843459359555,39.555471942236323,12.379065763468395,42.806626286621963,14.124422306243844)"
# return (params, "kvp")
#class WCS20PostGetCoveragePolygonMaskTestCase(testbase.RectifiedGridCoverageTestCase):
# def getRequest(self):
# params = """<wcs:GetCoverage service="WCS" version="2.0.0"
# xmlns:wcs="http://www.opengis.net/wcs/2.0"
# xmlns:wcsmask="http://www.opengis.net/wcs/mask/1.0">
# <wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
# <wcs:format>image/tiff</wcs:format>
# <wcs:Extension>
# <wcsmask:polygonMask>14.124422306243844 42.806626286621963 21.208516509273601 43.730638573973678 21.208516509273601 43.730638573973678 21.892970055460054 37.8443380767702 15.04843459359555 36.646544370943914 12.379065763468395 39.555471942236323 14.124422306243844 42.806626286621963</wcsmask:polygonMask>
# </wcs:Extension>
# </wcs:GetCoverage>"""
# return (params, "xml")
#===============================================================================
# WCS 2.0: Interpolation
#===============================================================================
class WCS20GetCoverageDatasetInterpolationNearestTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced&format=image/tiff&subset=x(200,250)&subset=y(200,250)outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&interpolation=http://www.opengis.net/def/interpolation/OGC/1/nearest-neighbour"
return (params, "kvp")
class WCS20GetCoverageDatasetInterpolationAverageTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced&format=image/tiff&subset=x(200,250)&subset=y(200,250)outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&interpolation=http://www.opengis.net/def/interpolation/OGC/1/average"
return (params, "kvp")
class WCS20GetCoverageDatasetInterpolationBilinearTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced&format=image/tiff&subset=x(200,250)&subset=y(200,250)outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&interpolation=http://www.opengis.net/def/interpolation/OGC/1/bilinear"
return (params, "kvp")
class WCS20GetCoverageInvalidInterpolationFaultTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced&format=image/tiff&subset=x(200,250)&subset=y(200,250)outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&interpolation=http://www.opengis.net/def/interpolation/OGC/1/invalid"
return (params, "kvp")
def getExpectedExceptionCode(self):
return "InterpolationMethodNotSupported"
def getExpectedHTTPStatus(self):
return 404
#===============================================================================
# WCS 2.0 Rasdaman test cases
#===============================================================================
class WCS20GetCoverageRasdamanMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related"
return (params, "kvp")
class WCS20GetCoverageRasdamanMultipartDatasetSubsetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&subset=x(100,200)&subset=y(200,300)"
return (params, "kvp")
class WCS20GetCoverageRasdamanMultipartDatasetSizeTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&scalesize=x(100),y(100)"
return (params, "kvp")
# TODO: not supported anymore (WCS 2.0 Scaling Extension)
#class WCS20GetCoverageRasdamanMultipartDatasetResolutionTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&resolution=x(0.1)&resolution=y(0.1)"
# return (params, "kvp")
class WCS20GetCoverageRasdamanMultipartDatasetOutputCRSTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035"
return (params, "kvp")
class WCS20GetCoverageRasdamanMultipartDatasetSubsetSizeTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&subset=x(100,200)&subset=y(200,300)&scalesize=x(20),y(20)"
return (params, "kvp")
# TODO: not supported anymore (WCS 2.0 Scaling Extension)
#class WCS20GetCoverageRasdamanMultipartDatasetSubsetResolutionTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
# def getRequest(self):
# params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&subset=x(100,200)&subset=y(200,300)&resolution=x(0.1)&resolution=y(0.1)"
# return (params, "kvp")
class WCS20GetCoverageRasdamanMultipartDatasetRangeSubsetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&mediatype=multipart/related&rangesubset=1"
return (params, "kvp")
class WCS20GetCoverageRasdamanSubsetSizeResolutionOutputCRSRangeSubsetIndicesDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.RasdamanTestCaseMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=mosaic_MER_FRS_1PNPDE20060830_100949_000001972050_00423_23523_0079_RGB_reduced_rasdaman&format=image/tiff&subset=x(100,200)&subset=y(200,300)&size=y(100)&resolution=x(0.1)&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3035&rangesubset=1,2,3&mediatype=multipart/related"
return (params, "kvp")
#===============================================================================
# WCS 2.0: GetCov with EPSG:3035 input images
#===============================================================================
class WCS20DescribeCoverageReprojectedDatasetTestCase(testbase.XMLTestCase):
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected"
return (params, "kvp")
class WCS20GetCoverageReprojectedDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected&format=image/tiff"
return (params, "kvp")
class WCS20GetCoverageReprojectedSubsetDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected&format=image/tiff&subset=x(100,200)&subset=y(200,300)"
return (params, "kvp")
class WCS20GetCoverageReprojectedSubsetEPSG4326DatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected&format=image/tiff&mediatype=multipart/related&subset=lat(38,40)&subset=long(20,22)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageReprojectedMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected&format=image/tiff&mediatype=multipart/related"
return (params, "kvp")
class WCS20GetCoverageReprojectedEPSG3857DatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed_reprojected&format=image/tiff&mediatype=multipart/related&outputcrs=http://www.opengis.net/def/crs/EPSG/0/3857"
return (params, "kvp")
#===============================================================================
# WCS 2.0 Referenceable Grid Coverages
#===============================================================================
class WCS20DescribeCoverageReferenceableDatasetTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeCoverage response for a wcseo:ReferenceableDataset."""
def getRequest(self):
params = "service=WCS&version=2.0.0&request=DescribeCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775"
return (params, "kvp")
class WCS20GetCoverageReferenceableDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageReferenceableGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775&format=image/tiff&mediatype=multipart/related"
return (params, "kvp")
class WCS20GetCoverageReferenceableDatasetImageCRSSubsetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageReferenceableGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775&format=image/tiff&mediatype=multipart/related&subset=x(0,99)&subset=y(0,99)"
return (params, "kvp")
class WCS20GetCoverageReferenceableDatasetGeogCRSSubsetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageReferenceableGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775&format=image/tiff&mediatype=multipart/mixed&subset=x(18.0,20.0)&subset=y(-34.5,-33.5)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageReferenceableDatasetGeogCRSSubsetExceedsExtentTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageReferenceableGridCoverageMultipartTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775&format=image/tiff&mediatype=multipart/mixed&subset=x(18,23)&subset=y(-35,-33)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
class WCS20GetCoverageReferenceableDatasetGeogCRSSubsetOutsideExtentTestCase(testbase.ExceptionTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775&format=image/tiff&mediatype=multipart/mixed&subset=x(14.5,16.5)&subset=y(-34.5,-33.5)&subsettingcrs=http://www.opengis.net/def/crs/EPSG/0/4326"
return (params, "kvp")
def getExpectedHTTPStatus(self):
return 400
def getExpectedExceptionCode(self):
return "InvalidParameterValue"
#===============================================================================
# WCS 2.0.1 Corrigendum test cases
#===============================================================================
class WCS20CorrigendumGetCapabilitiesEmptyTestCase(testbase.XMLTestCase):
""" This test shall retrieve a valid but empty WCS 2.0.1 EO-AP (EO-WCS)
GetCapabilities response (see #162)
"""
fixtures = testbase.BASE_FIXTURES
def getRequest(self):
params = "service=WCS&version=2.0.1&request=GetCapabilities"
return (params, "kvp")
class WCS20CorrigendumDescribeCoverageDatasetTestCase(testbase.XMLTestCase):
""" This test shall retrieve a valid WCS 2.0.1 EO-AP (EO-WCS)
DescribeCoverage response for a wcseo:RectifiedDataset (see #162).
"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=DescribeCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
return (params, "kvp")
class WCS20CorrigendumDescribeEOCoverageSetMosaicTestCase(testbase.XMLTestCase):
""" This test shall retrieve a valid WCS 2.0.1 EO-AP (EO-WCS)
DescribeEOCoverageSet response for a wcseo:RectifiedStitchedMosaic
(see #162)
"""
def getRequest(self):
params = "service=WCS&version=2.0.1&request=DescribeEOCoverageSet&eoId=mosaic_MER_FRS_1P_reduced_RGB"
return (params, "kvp")
class WCS20CorrigendumGetCoverageDatasetTestCase(testbase.RectifiedGridCoverageTestCase):
def getRequest(self):
params = "service=wcs&version=2.0.1&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed"
return (params, "kvp")
#===============================================================================
# WCS 2.0 - POST
#===============================================================================
class WCS20PostGetCapabilitiesValidTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) GetCapabilities response
via POST.
"""
def getRequest(self):
params = """<ns:GetCapabilities updateSequence="u2001" service="WCS"
xmlns:ns="http://www.opengis.net/wcs/2.0"
xmlns:ns1="http://www.opengis.net/ows/2.0">
<ns1:AcceptVersions><ns1:Version>2.0.0</ns1:Version></ns1:AcceptVersions>
</ns:GetCapabilities>
"""
return (params, "xml")
class WCS20PostDescribeCoverageDatasetTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeCoverage response
for a wcseo:RectifiedDataset via POST.
"""
def getRequest(self):
params = """<ns:DescribeCoverage
xmlns:ns="http://www.opengis.net/wcs/2.0" service="WCS" version="2.0.0">
<ns:CoverageId>MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_uint16_reduced_compressed</ns:CoverageId>
</ns:DescribeCoverage>"""
return (params, "xml")
class WCS20PostDescribeEOCoverageSetDatasetSeriesTestCase(testbase.XMLTestCase):
"""This test shall retrieve a valid WCS 2.0 EO-AP (EO-WCS) DescribeEOCoverageSet response
for a wcseo:RectifiedDatasetSeries via POST.
"""
def getRequest(self):
params = """<wcseo:DescribeEOCoverageSet service="WCS" version="2.0.0" count="100"
xmlns:wcseo="http://www.opengis.net/wcs/wcseo/1.0"
xmlns:wcs="http://www.opengis.net/wcs/2.0">
<wcseo:eoId>MER_FRS_1P_reduced</wcseo:eoId>
<wcseo:containment>OVERLAPS</wcseo:containment>
<wcseo:Sections>
<wcseo:Section>All</wcseo:Section>
</wcseo:Sections>
<wcs:DimensionTrim>
<wcs:Dimension>Long</wcs:Dimension>
<wcs:TrimLow>16</wcs:TrimLow>
<wcs:TrimHigh>18</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>Lat</wcs:Dimension>
<wcs:TrimLow>46</wcs:TrimLow>
<wcs:TrimHigh>48</wcs:TrimHigh>
</wcs:DimensionTrim>
</wcseo:DescribeEOCoverageSet>"""
return (params, "xml")
class WCS20PostGetCoverageMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0">
<wcs:CoverageId>mosaic_MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_RGB_reduced</wcs:CoverageId>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageSubsetMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0">
<wcs:CoverageId>mosaic_MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_RGB_reduced</wcs:CoverageId>
<wcs:DimensionTrim>
<wcs:Dimension>x</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>99</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>y</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>99</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageSubsetEPSG4326MultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:crs="http://www.opengis.net/wcs/crs/1.0">
<wcs:Extension>
<crs:subsettingCrs>http://www.opengis.net/def/crs/EPSG/0/4326</crs:subsettingCrs>
</wcs:Extension>
<wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
<wcs:DimensionTrim>
<wcs:Dimension>Long</wcs:Dimension>
<wcs:TrimLow>20</wcs:TrimLow>
<wcs:TrimHigh>22</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>Lat</wcs:Dimension>
<wcs:TrimLow>38</wcs:TrimLow>
<wcs:TrimHigh>40</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageReferenceableMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageReferenceableGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0">
<wcs:CoverageId>ASA_WSM_1PNDPA20050331_075939_000000552036_00035_16121_0775</wcs:CoverageId>
<wcs:DimensionTrim>
<wcs:Dimension>x</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>100</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>y</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>100</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageRangeSubsetMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:rsub="http://www.opengis.net/wcs/range-subsetting/1.0">
<wcs:Extension>
<rsub:RangeSubset>
<rsub:RangeItem>
<rsub:RangeComponent>MERIS_radiance_04_uint16</rsub:RangeComponent>
</rsub:RangeItem>
<rsub:RangeItem>
<rsub:RangeInterval>
<rsub:startComponent>MERIS_radiance_05_uint16</rsub:startComponent>
<rsub:endComponent>MERIS_radiance_07_uint16</rsub:endComponent>
</rsub:RangeInterval>
</rsub:RangeItem>
</rsub:RangeSubset>
</wcs:Extension>
<wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
<wcs:DimensionTrim>
<wcs:Dimension>x</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>99</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>y</wcs:Dimension>
<wcs:TrimLow>0</wcs:TrimLow>
<wcs:TrimHigh>99</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageScaleSizeMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:scal="http://www.opengis.net/wcs/scaling/1.0">
<wcs:Extension>
<scal:ScaleToSize>
<scal:TargetAxisSize>
<scal:axis>x</scal:axis>
<scal:targetSize>50</scal:targetSize>
</scal:TargetAxisSize>
<scal:TargetAxisSize>
<scal:axis>y</scal:axis>
<scal:targetSize>50</scal:targetSize>
</scal:TargetAxisSize>
</scal:ScaleToSize>
</wcs:Extension>
<wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageScaleExtentMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:scal="http://www.opengis.net/wcs/scaling/1.0">
<wcs:Extension>
<scal:ScaleToExtent>
<scal:TargetAxisExtent>
<scal:axis>x</scal:axis>
<scal:low>50</scal:low>
<scal:high>100</scal:high>
</scal:TargetAxisExtent>
<scal:TargetAxisExtent>
<scal:axis>y</scal:axis>
<scal:low>50</scal:low>
<scal:high>100</scal:high>
</scal:TargetAxisExtent>
</scal:ScaleToExtent>
</wcs:Extension>
<wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
class WCS20PostGetCoverageInterpolationMultipartDatasetTestCase(wcsbase.WCS20GetCoverageMixIn, testbase.WCS20GetCoverageRectifiedGridCoverageMultipartTestCase):
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:int="http://www.opengis.net/wcs/interpolation/1.0"
xmlns:crs="http://www.opengis.net/wcs/crs/1.0">
<wcs:Extension>
<int:Interpolation>
<int:globalInterpolation>http://www.opengis.net/def/interpolation/OGC/1/bilinear</int:globalInterpolation>
</int:Interpolation>
<crs:subsettingCrs>http://www.opengis.net/def/crs/EPSG/0/4326</crs:subsettingCrs>
</wcs:Extension>
<wcs:CoverageId>MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed</wcs:CoverageId>
<wcs:DimensionTrim>
<wcs:Dimension>Long</wcs:Dimension>
<wcs:TrimLow>20</wcs:TrimLow>
<wcs:TrimHigh>22</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:DimensionTrim>
<wcs:Dimension>Lat</wcs:Dimension>
<wcs:TrimLow>36</wcs:TrimLow>
<wcs:TrimHigh>38</wcs:TrimHigh>
</wcs:DimensionTrim>
<wcs:format>image/tiff</wcs:format>
<wcs:mediaType>multipart/related</wcs:mediaType>
</wcs:GetCoverage>"""
return (params, "xml")
# WCS 2.0 GetCoverage GeoTIFF
class WCS20GetCoverageDatasetGeoTIFFPackBitsTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "PACKBITS"
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=PackBits"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFHuffmanTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "CCITTRLE"
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=Huffman"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFLZWTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "LZW"
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=LZW"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFJPEGLowTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "JPEG"
expected_jpeg_quality = 50
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=JPEG&geotiff:jpeg_quality=50"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFJPEGHighTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "JPEG"
expected_jpeg_quality = 90
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=JPEG&geotiff:jpeg_quality=90"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFDeflateTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_compression = "DEFLATE"
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:compression=Deflate&geotiff:predictor=Horizontal"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFInterleaveBandTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_interleave = "BAND"
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:interleave=Band"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFTiled16TestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_tiling = (16, 16)
def getRequest(self):
params = "service=wcs&version=2.0.0&request=GetCoverage&CoverageId=MER_FRS_1PNPDE20060822_092058_000001972050_00308_23408_0077_uint16_reduced_compressed&format=image/tiff&geotiff:tiling=true&geotiff:tilewidth=16&geotiff:tileheight=16"
return (params, "kvp")
class WCS20GetCoverageDatasetGeoTIFFPostTestCase(wcsbase.GeoTIFFMixIn, testbase.RectifiedGridCoverageTestCase):
expected_tiling = (32, 64)
expected_interleave = "BAND"
expected_compression = "DEFLATE"
def getRequest(self):
params = """<wcs:GetCoverage service="WCS" version="2.0.1"
xmlns:wcs="http://www.opengis.net/wcs/2.0"
xmlns:geotiff="http://www.opengis.net/gmlcov/geotiff/1.0">
<wcs:CoverageId>mosaic_MER_FRS_1PNPDE20060816_090929_000001972050_00222_23322_0058_RGB_reduced</wcs:CoverageId>
<wcs:format>image/tiff</wcs:format>
<wcs:Extension>
<geotiff:parameters>
<geotiff:compression>Deflate</geotiff:compression>
<geotiff:predictor>FloatingPoint</geotiff:predictor>
<geotiff:interleave>Band</geotiff:interleave>
<geotiff:tiling>true</geotiff:tiling>
<geotiff:tilewidth>32</geotiff:tilewidth>
<geotiff:tileheight>64</geotiff:tileheight>
</geotiff:parameters>
</wcs:Extension>
</wcs:GetCoverage>"""
return (params, "xml")
| 55.9
| 500
| 0.72898
| 7,998
| 78,260
| 6.968742
| 0.077644
| 0.007571
| 0.043616
| 0.058598
| 0.781847
| 0.772015
| 0.755741
| 0.74503
| 0.734875
| 0.716072
| 0
| 0.107641
| 0.128916
| 78,260
| 1,399
| 501
| 55.939957
| 0.70995
| 0.206645
| 0
| 0.61615
| 0
| 0.105089
| 0.520883
| 0.333496
| 0
| 0
| 0
| 0.000715
| 0
| 1
| 0.212389
| false
| 0
| 0.003319
| 0.06969
| 0.589602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6a61d6e68b839104d63a170afe035bdaf41af83a
| 94,213
|
py
|
Python
|
models/attention_module.py
|
Jumperkables/trying_blp
|
4987e4d00b4ed8caa38b3e606b98feff1f88cd5d
|
[
"MIT"
] | null | null | null |
models/attention_module.py
|
Jumperkables/trying_blp
|
4987e4d00b4ed8caa38b3e606b98feff1f88cd5d
|
[
"MIT"
] | null | null | null |
models/attention_module.py
|
Jumperkables/trying_blp
|
4987e4d00b4ed8caa38b3e606b98feff1f88cd5d
|
[
"MIT"
] | 1
|
2021-12-16T10:24:19.000Z
|
2021-12-16T10:24:19.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from memory_rand import *
from memory_module import *
################################################################################################
import os
import sys
sys.path.insert(0, os.path.expanduser("~/kable_management/pooling_pkgs/block_py2") )
import fusions
################################################################################################
class SpatialAttentionModule(nn.Module):
def __init__(self, input_size=3072, feat_dim=7, hidden_size=512, dropout=0.2):
"""Set the hyper-parameters and build the layers."""
super(SpatialAttentionModule, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.feat_dim = feat_dim
# alignment model
# see appendices A.1.2 of neural machine translation
self.Wa = nn.Parameter(torch.FloatTensor(input_size, hidden_size),requires_grad=True)
self.Ua = nn.Parameter(torch.FloatTensor(hidden_size*2, hidden_size),requires_grad=True)
self.Va = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.ba = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.drop_keep_prob_image_embed_t = nn.Dropout(dropout)
self.init_weights()
def init_weights(self):
self.Wa.data.normal_(0.0, 0.01)
self.Ua.data.normal_(0.0, 0.01)
self.Va.data.normal_(0.0, 0.01)
self.ba.data.fill_(0)
def forward(self, hidden_frames, hidden_text):
# hidden_text: 1 x 1024 (tgif-qa paper Section 4.2, use a two layer one-directional LSTM, combining each layer's hidden)
# hidden_frame: 1 x 7 x 7 x 3072 (from C3D and resnet, 1024+2048 = 3072)
#print(hidden_frames.size(),'spatial 1---') # (1, 1, 1, 2048) or (1, 7, 7, 3072)
assert self.feat_dim==hidden_frames.size(2)
hidden_frames = hidden_frames.view(hidden_frames.size(0), hidden_frames.size(1) * hidden_frames.size(2), hidden_frames.size(3))
#hidden_frames = hidden_frames.permute([0,2,1])
#print('Spatial hidden_frames transferred size', hidden_frames.size()) # should be 1 x 49 x 3072 or (1, 1, 2048)
# precompute Uahj, see Appendix A.1.2 Page12, last sentence,
# NEURAL MACHINE TRANSLATION BY JOINTLY LEARNING TO ALIGN AND TRANSLATE
Uh = torch.matmul(hidden_text, self.Ua) # (1,512)
Uh = Uh.view(Uh.size(0),1,Uh.size(1)) # (1,1,512)
#Uh = Uh.repeat(1,hidden_frames.size(1),1)
#print('Spatial Uh size', Uh.size()) # (1, 1, 512)
# see appendices A.1.2 of neural machine translation
# Page 12 last line
# W is 512x512, s_i-1 is
Ws = torch.matmul(hidden_frames, self.Wa) # (1,49,512)
#print('Spatial Ws size',Ws.size()) # (1, 1 or 49, 512)
att_vec = torch.matmul( torch.tanh(Ws + Uh + self.ba), self.Va )
att_vec = F.softmax(att_vec, dim=1) # normalize by Softmax, see Eq(15)
#print('Spatial att_vec size',att_vec.size()) # should be 1x49, as weights for each encoder output ht
att_vec = att_vec.view(att_vec.size(0),att_vec.size(1),1) # expand att_vec from 1x49 to 1x49x1
#print('Spatial expanded att_vec size',att_vec.size()) # (1, 1 or 49, 1)
# Hori ICCV 2017
# Eq(10) c_i
ht_weighted = att_vec * hidden_frames
#print('Spatial ht_weighted size', ht_weighted.size()) # should be (1,49,input_size) (1, 1(or 49), 2048)
ht_sum = torch.sum(ht_weighted, dim=1)
ht_sum = self.drop_keep_prob_image_embed_t(ht_sum)
#print('Spatial ht_sum size', ht_sum.size()) # should be (1,input_size) (1, 2048)
return ht_sum
class TemporalAttentionModule(nn.Module):
def __init__(self, input_size, hidden_size=512):
"""Set the hyper-parameters and build the layers."""
super(TemporalAttentionModule, self).__init__()
self.input_size = input_size # in most cases, 2*hidden_size
self.hidden_size = hidden_size
# alignment model
# see appendices A.1.2 of neural machine translation
#self.Wa = nn.Parameter(torch.FloatTensor(hidden_size*2, hidden_size),requires_grad=True)
#self.Ua = nn.Parameter(torch.FloatTensor(hidden_size*2, hidden_size),requires_grad=True)
self.Wa = nn.Parameter(torch.FloatTensor(input_size, hidden_size),requires_grad=True)
self.Ua = nn.Parameter(torch.FloatTensor(input_size, hidden_size),requires_grad=True)
self.Va = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.ba = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.init_weights()
def init_weights(self):
self.Wa.data.normal_(0.0, 0.01)
self.Ua.data.normal_(0.0, 0.01)
self.Va.data.normal_(0.0, 0.01)
self.ba.data.fill_(0)
def forward(self, hidden_frames, hidden_text, inv_attention=False):
# hidden_text: 1 x 1024 (looks like a two layer one-directional LSTM, combining each layer's hidden)
# hidden_frame: 1 x T x 1024 (from video encoder output, 1024 is similar from above)
#print('Temporal hidden_text transferred size', hidden_text.size()) # should be 1 x 1024
#print('Temporal hidden_frames transferred size', hidden_frames.size()) # should be 1 x T x 3072
# precompute Uahj, see Appendix A.1.2 Page12, last sentence,
# NEURAL MACHINE TRANSLATION BY JOINTLY LEARNING TO ALIGN AND TRANSLATE
Uh = torch.matmul(hidden_text, self.Ua) # (1,512)
Uh = Uh.view(Uh.size(0),1,Uh.size(1)) # (1,1,512)
#print('Temporal Uh size', Uh.size()) # (1, 1, 512)
# see appendices A.1.2 of neural machine translation
# Page 12 last line
Ws = torch.matmul(hidden_frames, self.Wa) # (1,T,512)
#print('Temporal Ws size',Ws.size()) # (1, T, 512)
att_vec = torch.matmul( torch.tanh(Ws + Uh + self.ba), self.Va )
if inv_attention==True:
att_vec = - att_vec
att_vec = F.softmax(att_vec, dim=1) # normalize by Softmax, see Eq(15)
#print('Temporal att_vec size',att_vec.size()) # should be 1xT, as weights for each encoder output ht (1,T,1)
# if inv_attention==True:
# att_vec = 1.0 - att_vec
att_vec = att_vec.view(att_vec.size(0),att_vec.size(1),1) # expand att_vec from 1xT to 1xTx1
#print('Temporal expanded att_vec size',att_vec.size())
# Hori ICCV 2017
# Eq(10) c_i
ht_weighted = att_vec * hidden_frames
#print('Temporal ht_weighted size', ht_weighted.size()) # should be (1,T,input_size) # (1, T, 1024)
ht_sum = torch.sum(ht_weighted, dim=1)
#print('Temporal ht_sum size', ht_sum.size()) # should be (1,input_size) (1, 1024)
return ht_sum
# attention-based multimodal
class MultiModalNaiveModule(nn.Module):
def __init__(self, hidden_size=512, simple=False):
"""Set the hyper-parameters and build the layers."""
super(MultiModalNaiveModule, self).__init__()
self.hidden_size = hidden_size
self.simple=simple
# alignment model
# see appendices A.1.2 of neural machine translation
self.Wav = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wat = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Uav = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Uat = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Vav = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.Vat = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.bav = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.bat = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.Whh = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wvh = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wth = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.bh = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.init_weights()
def init_weights(self):
self.Wav.data.normal_(0.0, 0.01)
self.Wat.data.normal_(0.0, 0.01)
self.Uav.data.normal_(0.0, 0.01)
self.Uat.data.normal_(0.0, 0.01)
self.Vav.data.normal_(0.0, 0.01)
self.Vat.data.normal_(0.0, 0.01)
self.bav.data.fill_(0)
self.bat.data.fill_(0)
self.Whh.data.normal_(0.0, 0.01)
self.Wvh.data.normal_(0.0, 0.01)
self.Wth.data.normal_(0.0, 0.01)
self.bh.data.fill_(0)
def forward(self, h, hidden_frames, hidden_text, inv_attention=False):
# hidden_text: 1 x T1 x 1024 (looks like a two layer one-directional LSTM, combining each layer's hidden)
# hidden_frame: 1 x T2 x 1024 (from video encoder output, 1024 is similar from above)
#print hidden_frames.size(),hidden_text.size()
Uhv = torch.matmul(h, self.Uav) # (1,512)
Uhv = Uhv.view(Uhv.size(0),1,Uhv.size(1)) # (1,1,512)
Uht = torch.matmul(h, self.Uat) # (1,512)
Uht = Uht.view(Uht.size(0),1,Uht.size(1)) # (1,1,512)
#print Uhv.size(),Uht.size()
Wsv = torch.matmul(hidden_frames, self.Wav) # (1,T,512)
#print Wsv.size()
att_vec_v = torch.matmul( torch.tanh(Wsv + Uhv + self.bav), self.Vav )
Wst = torch.matmul(hidden_text, self.Wat) # (1,T,512)
att_vec_t = torch.matmul( torch.tanh(Wst + Uht + self.bat), self.Vat )
att_vec_v = torch.softmax(att_vec_v, dim=1)
att_vec_t = torch.softmax(att_vec_t, dim=1)
#print att_vec_v.size(),att_vec_t.size()
att_vec_v = att_vec_v.view(att_vec_v.size(0),att_vec_v.size(1),1) # expand att_vec from 1xT to 1xTx1
att_vec_t = att_vec_t.view(att_vec_t.size(0),att_vec_t.size(1),1) # expand att_vec from 1xT to 1xTx1
hv_weighted = att_vec_v * hidden_frames
hv_sum = torch.sum(hv_weighted, dim=1)
ht_weighted = att_vec_t * hidden_text
ht_sum = torch.sum(ht_weighted, dim=1)
output = torch.tanh( torch.matmul(h,self.Whh) + torch.matmul(hv_sum,self.Wvh) +
torch.matmul(ht_sum, self.Wth) + self.bh )
output = output.view(output.size(1),output.size(2))
return output
class MultiModalAttentionModule(nn.Module):
def __init__(self, opt, blp, hidden_size=512, simple=False):
"""Set the hyper-parameters and build the layers."""
super(MultiModalAttentionModule, self).__init__()
self.hidden_size = hidden_size
self.simple=simple
# alignment model
# see appendices A.1.2 of neural machine translation
self.Wav = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wat = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Uav = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Uat = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Vav = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.Vat = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.bav = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.bat = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.Whh = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wvh = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Wth = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.bh = nn.Parameter(torch.FloatTensor(1,1,hidden_size),requires_grad=True)
self.video_sum_encoder = nn.Linear(hidden_size, hidden_size)
self.question_sum_encoder = nn.Linear(hidden_size, hidden_size)
self.Wb = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Vbv = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.Vbt = nn.Parameter(torch.FloatTensor(hidden_size, hidden_size),requires_grad=True)
self.bbv = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.bbt = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.wb = nn.Parameter(torch.FloatTensor(hidden_size),requires_grad=True)
self.blp = blp
self.init_weights()
def init_weights(self):
self.Wav.data.normal_(0.0, 0.01)
self.Wat.data.normal_(0.0, 0.01)
self.Uav.data.normal_(0.0, 0.01)
self.Uat.data.normal_(0.0, 0.01)
self.Vav.data.normal_(0.0, 0.01)
self.Vat.data.normal_(0.0, 0.01)
self.bav.data.fill_(0)
self.bat.data.fill_(0)
self.Whh.data.normal_(0.0, 0.01)
self.Wvh.data.normal_(0.0, 0.01)
self.Wth.data.normal_(0.0, 0.01)
self.bh.data.fill_(0)
self.video_sum_encoder.weight.data.normal_(0.0, 0.02)
self.video_sum_encoder.bias.data.fill_(0)
self.question_sum_encoder.weight.data.normal_(0.0, 0.02)
self.question_sum_encoder.bias.data.fill_(0)
self.Wb.data.normal_(0.0, 0.01)
self.Vbv.data.normal_(0.0, 0.01)
self.Vbt.data.normal_(0.0, 0.01)
self.wb.data.normal_(0.0, 0.01)
self.bbv.data.fill_(0)
self.bbt.data.fill_(0)
def forward(self, h, hidden_frames, hidden_text):
# hidden_text: 1 x T1 x 1024 (looks like a two layer one-directional LSTM, combining each layer's hidden)
# hidden_frame: 1 x T2 x 1024 (from video encoder output, 1024 is similar from above)
#print hidden_frames.size(),hidden_text.size()
Uhv = torch.matmul(h, self.Uav) # (1,512)
Uhv = Uhv.view(Uhv.size(0),1,Uhv.size(1)) # (1,1,512)
Uht = torch.matmul(h, self.Uat) # (1,512)
Uht = Uht.view(Uht.size(0),1,Uht.size(1)) # (1,1,512)
#print Uhv.size(),Uht.size()
Wsv = torch.matmul(hidden_frames, self.Wav) # (1,T,512)
#print Wsv.size()
att_vec_v = torch.matmul( torch.tanh(Wsv + Uhv + self.bav), self.Vav )
Wst = torch.matmul(hidden_text, self.Wat) # (1,T,512)
att_vec_t = torch.matmul( torch.tanh(Wst + Uht + self.bat), self.Vat )
att_vec_v = torch.softmax(att_vec_v, dim=1)
att_vec_t = torch.softmax(att_vec_t, dim=1)
#print att_vec_v.size(),att_vec_t.size()
att_vec_v = att_vec_v.view(att_vec_v.size(0),att_vec_v.size(1),1) # expand att_vec from 1xT to 1xTx1
att_vec_t = att_vec_t.view(att_vec_t.size(0),att_vec_t.size(1),1) # expand att_vec from 1xT to 1xTx1
hv_weighted = att_vec_v * hidden_frames
hv_sum = torch.sum(hv_weighted, dim=1)
hv_sum2 = self.video_sum_encoder(hv_sum)
ht_weighted = att_vec_t * hidden_text
ht_sum = torch.sum(ht_weighted, dim=1)
ht_sum2 = self.question_sum_encoder(ht_sum)
Wbs = torch.matmul(h, self.Wb)
mt1 = torch.matmul(ht_sum, self.Vbt) + self.bbt + Wbs
mv1 = torch.matmul(hv_sum, self.Vbv) + self.bbv + Wbs
# Bilinear pooling goes here
if self.blp is not None:
mtv = torch.tanh(self.blp([mv1[0],mt1[0]]).view(2,-1))
else:
mtv = torch.tanh(torch.cat([mv1,mt1],dim=0))
mtv2 = torch.matmul(mtv, self.wb)
beta = torch.softmax(mtv2,dim=0)
#print beta.size(),beta
output = torch.tanh( torch.matmul(h,self.Whh) + beta[0] * hv_sum2 +
beta[1] * ht_sum2 + self.bh )
output = output.view(output.size(1),output.size(2))
return output
class LSTMEncDec(nn.Module):
def __init__(self, feat_channel, feat_dim, text_embed_size, hidden_size, vocab_size, num_layers, word_matrix,
answer_vocab_size=None, max_len=20, dropout=0.2):
"""Set the hyper-parameters and build the layers."""
super(LSTMEncDec, self).__init__()
# text input size
self.text_embed_size = text_embed_size # should be 300
# video input size
self.feat_channel = feat_channel
self.feat_dim = feat_dim # should be 7
self.hidden_size = hidden_size
self.num_layers = num_layers
self.drop_keep_prob_final_att_vec = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, text_embed_size)
self.lstm_text_1 = nn.LSTMCell(text_embed_size, hidden_size)
self.lstm_text_2 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1 = nn.LSTMCell(feat_channel, hidden_size)
self.lstm_video_2 = nn.LSTMCell(hidden_size, hidden_size)
self.video_encoder = nn.Linear(feat_channel, hidden_size * 2)
self.linear_decoder_count_1 = nn.Linear(hidden_size * 2, hidden_size * 2)
if answer_vocab_size is not None:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2, answer_vocab_size)
else:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2, 1) # Count is regression problem
self.max_len = max_len
self.init_weights(word_matrix)
def init_weights(self, word_matrix):
"""Initialize weights."""
if word_matrix is None:
self.embed.weight.data.uniform_(-0.1, 0.1)
else:
# init embed from glove
self.embed.weight.data.copy_(torch.from_numpy(word_matrix))
def init_hiddens(self):
s_t = torch.zeros(1, self.hidden_size).cuda()
s_t2 = torch.zeros(1, self.hidden_size).cuda()
c_t = torch.zeros(1, self.hidden_size).cuda()
c_t2 = torch.zeros(1, self.hidden_size).cuda()
return s_t, s_t2, c_t, c_t2
def forward(self, data_dict):
ret = self.forward_action(data_dict)
return ret
def forward_action(self, data_dict):
video_features = data_dict['video_features']
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
num_mult_choices = data_dict['num_mult_choices']
outputs = []
predictions = []
batch_size = len(questions) # batch size has to be 1
for j in range(batch_size):
features_question_j = self.embed(questions[j])
vgg, c3d = video_features[0][j], video_features[1][j]
feature = torch.cat([vgg, c3d], dim=1)
#print feature.size()
nImg = vgg.shape[0]
outputs_j = []
for n_cand in range(num_mult_choices):
nQuestionWords = question_lengths[j]
#############################
# run text encoder first time
#############################
s1_t1, s1_t2, c1_t1, c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_question_j[n_cand, i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
###########################################
# run video encoder with spatial attention
###########################################
sV_t1, sV_t2, cV_t1, cV_t2 = self.init_hiddens()
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1 = []
hidden_array_2 = []
for i in xrange(nImg):
# lstm
sV_t1, cV_t1 = self.lstm_video_1(feature[i:i + 1], (sV_t1, cV_t1))
sV_t2, cV_t2 = self.lstm_video_2(sV_t1, (sV_t2, cV_t2))
sV_t1_vec = sV_t1.view(sV_t1.size(0), 1, sV_t1.size(1))
sV_t2_vec = sV_t2.view(sV_t2.size(0), 1, sV_t2.size(1))
hidden_array_1.append(sV_t1_vec)
hidden_array_2.append(sV_t2_vec)
# assume sV_t1 is of size (1,1,hidden)
sV_l1 = torch.cat(hidden_array_1, dim=1)
sV_l2 = torch.cat(hidden_array_2, dim=1)
sV_ll = torch.cat((sV_l1, sV_l2), dim=2)
#############################
# run text encoder second time
#############################
# here sT_t1, sT_t2 are the last hiddens from video, input to text encoder againa
input_question = features_question_j[n_cand,0:1]
# print('input_question size', input_question.size())
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sV_t1, cV_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sV_t2, cV_t2))
for i in xrange(1, nQuestionWords):
input_question = features_question_j[n_cand,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
# print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat((sT_t1, sT_t2), dim=1) # should be of size (1,1024)
final_embed = self.linear_decoder_count_1(sT_t)
output = self.linear_decoder_count_2(final_embed)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j, 1)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_, mx_idx = torch.max(outputs_j, 1)
predictions.append(mx_idx)
# print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
return outputs, predictions
def accuracy(self, logits, targets):
correct = torch.sum(logits.eq(targets)).float()
return correct * 100.0 / targets.size(0)
class TGIFBenchmark(nn.Module):
def __init__(self, feat_channel, feat_dim, text_embed_size, hidden_size, vocab_size, num_layers, word_matrix,
answer_vocab_size=None, max_len=20, dropout=0.2):
"""Set the hyper-parameters and build the layers."""
super(TGIFBenchmark, self).__init__()
# text input size
self.text_embed_size = text_embed_size # should be 300
# video input size
self.feat_channel = feat_channel
self.feat_dim = feat_dim # should be 7
self.hidden_size = hidden_size
self.num_layers = num_layers
self.TpAtt = TemporalAttentionModule(hidden_size * 2, hidden_size)
self.drop_keep_prob_final_att_vec = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, text_embed_size)
self.lstm_text_1 = nn.LSTMCell(text_embed_size, hidden_size)
self.lstm_text_2 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1 = nn.LSTMCell(feat_channel, hidden_size)
self.lstm_video_2 = nn.LSTMCell(hidden_size, hidden_size)
self.video_encoder = nn.Linear(feat_channel, hidden_size * 2)
self.linear_decoder_att = nn.Linear(hidden_size * 2, hidden_size * 2)
self.linear_decoder_mem = nn.Linear(hidden_size * 2 + hidden_size, hidden_size * 2)
self.linear_decoder_count_1 = nn.Linear(hidden_size * 2, hidden_size * 2)
if answer_vocab_size is not None:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2, answer_vocab_size)
else:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2, 1) # Count is regression problem
self.max_len = max_len
self.init_weights(word_matrix)
def init_weights(self, word_matrix):
"""Initialize weights."""
if word_matrix is None:
self.embed.weight.data.uniform_(-0.1, 0.1)
else:
# init embed from glove
self.embed.weight.data.copy_(torch.from_numpy(word_matrix))
def init_hiddens(self):
s_t = torch.zeros(1, self.hidden_size).cuda()
s_t2 = torch.zeros(1, self.hidden_size).cuda()
c_t = torch.zeros(1, self.hidden_size).cuda()
c_t2 = torch.zeros(1, self.hidden_size).cuda()
return s_t, s_t2, c_t, c_t2
def forward(self, data_dict):
ret = self.forward_action(data_dict)
return ret
def forward_action(self, data_dict):
video_features = data_dict['video_features']
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
num_mult_choices = data_dict['num_mult_choices']
outputs = []
predictions = []
batch_size = len(questions) # batch size has to be 1
for j in range(batch_size):
features_question_j = self.embed(questions[j])
vgg, c3d = video_features[0][j], video_features[1][j]
feature = torch.cat([vgg,c3d], dim=1)
#print feature.size()
nImg = vgg.shape[0]
outputs_j = []
for n_cand in range(num_mult_choices):
nQuestionWords = question_lengths[j]
#############################
# run text encoder first time
#############################
s1_t1, s1_t2, c1_t1, c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_question_j[n_cand,i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat((s1_t1, s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1, sV_t2, cV_t1, cV_t2 = self.init_hiddens()
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1 = []
hidden_array_2 = []
for i in xrange(nImg):
# lstm
sV_t1, cV_t1 = self.lstm_video_1(feature[i:i + 1], (sV_t1, cV_t1))
# print('Video encoding One size', sV_t1.size(), sV_t2.size()) # (1, 512), (1, 512)
sV_t2, cV_t2 = self.lstm_video_2(sV_t1, (sV_t2, cV_t2))
sV_t1_vec = sV_t1.view(sV_t1.size(0), 1, sV_t1.size(1))
sV_t2_vec = sV_t2.view(sV_t2.size(0), 1, sV_t2.size(1))
hidden_array_1.append(sV_t1_vec)
hidden_array_2.append(sV_t2_vec)
# assume sV_t1 is of size (1,1,hidden)
sV_l1 = torch.cat(hidden_array_1, dim=1)
sV_l2 = torch.cat(hidden_array_2, dim=1)
# print('sV_l1 sV_l2 size', sV_l1.size(),sV_l2.size()) # should be (1,1,hidden*2)
sV_ll = torch.cat((sV_l1, sV_l2), dim=2)
# print('Video encoding all frame size', sV_ll.size()) # should be (1,1,hidden*2)
#############################
# run text encoder second time
#############################
# here sT_t1, sT_t2 are the last hiddens from video, input to text encoder againa
input_question = features_question_j[n_cand,0:1]
# print('input_question size', input_question.size())
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sV_t1, cV_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sV_t2, cV_t2))
for i in xrange(1, nQuestionWords):
input_question = features_question_j[n_cand,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
# print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat((sT_t1, sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att = self.TpAtt(sV_ll, sT_t)
# print('Temporal attention output', vid_att.size()) # (1, 1024)
#########################
# decode the final output
#########################
# in section 4.4, Temporal attention
# compute the attended textual signal
final_embed = torch.tanh(self.linear_decoder_count_1(vid_att)) * sT_t
output = self.linear_decoder_count_2(final_embed)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j, 1)
# print('Output_j size', outputs_j.size()) # (1,5)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_, mx_idx = torch.max(outputs_j, 1)
predictions.append(mx_idx)
# print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
return outputs, predictions
def accuracy(self, logits, targets):
# print(logits.size(), targets.size(), logits.type(), targets.type())
# print(logits,targets)
# targets = targets.int()
correct = torch.sum(logits.eq(targets)).float()
# print(correct,targets.size(0))
# print(correct, targets.size(0), correct * 100.0 / targets.size(0))
return correct * 100.0 / targets.size(0)
class CoMemory(nn.Module):
def __init__(self, feat_channel, feat_dim, text_embed_size, hidden_size, vocab_size, num_layers, word_matrix,
answer_vocab_size=None, max_len=20, dropout=0.2, mm_version=1, useSpatial=False, useNaive=False,
mrmUseOriginFeat=False):
"""Set the hyper-parameters and build the layers."""
super(CoMemory, self).__init__()
# text input size
self.text_embed_size = text_embed_size # should be 300
# video input size
self.feat_channel = feat_channel
self.feat_dim = feat_dim # should be 7
self.hidden_size = hidden_size
self.num_layers = num_layers
self.useNaive = useNaive
self.mrmUseOriginFeat = mrmUseOriginFeat
self.useSpatial = useSpatial
self.mm_version = mm_version
self.TpAtt_a = TemporalAttentionModule(hidden_size * 2, hidden_size)
self.TpAtt_m = TemporalAttentionModule(hidden_size * 2, hidden_size)
if useSpatial:
self.SpAtt = SpatialAttentionModule(feat_channel, feat_dim, hidden_size)
else:
self.video_encoder = nn.Linear(feat_channel, hidden_size)
self.drop_keep_prob_final_att_vec = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, text_embed_size)
self.lstm_text_1 = nn.LSTMCell(text_embed_size, hidden_size)
self.lstm_text_2 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1a = nn.LSTMCell(4096, hidden_size)
self.lstm_video_2a = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1m = nn.LSTMCell(4096, hidden_size)
self.lstm_video_2m = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_mm_1 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_mm_2 = nn.LSTMCell(hidden_size, hidden_size)
self.linear_decoder_mem = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_1 = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_2 = nn.Linear(hidden_size * 2, hidden_size)
self.ma_decoder = nn.Linear(hidden_size * 2 * 3, hidden_size * 2)
self.mb_decoder = nn.Linear(hidden_size * 2 * 3, hidden_size * 2)
self.linear_decoder_count_1 = nn.Linear(hidden_size * 2 * 2, hidden_size * 2)
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2, 1)
self.max_len = max_len
self.epm1 = EpisodicMemory(hidden_size * 2)
self.epm2 = EpisodicMemory(hidden_size * 2)
self.init_weights(word_matrix)
def init_weights(self, word_matrix):
"""Initialize weights."""
if word_matrix is None:
self.embed.weight.data.uniform_(-0.1, 0.1)
else:
# init embed from glove
self.embed.weight.data.copy_(torch.from_numpy(word_matrix))
def init_hiddens(self):
s_t = torch.zeros(1, self.hidden_size).cuda()
s_t2 = torch.zeros(1, self.hidden_size).cuda()
c_t = torch.zeros(1, self.hidden_size).cuda()
c_t2 = torch.zeros(1, self.hidden_size).cuda()
return s_t, s_t2, c_t, c_t2
def forward(self, data_dict):
ret = self.forward_action(data_dict)
return ret
def forward_action(self, data_dict):
video_features = data_dict['video_features']
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
num_mult_choices = data_dict['num_mult_choices']
outputs = []
predictions = []
batch_size = len(questions) # batch size has to be 1
for j in range(batch_size):
features_question_j = self.embed(questions[j])
vgg, c3d = video_features[0][j], video_features[1][j]
nImg = vgg.shape[0]
outputs_j = []
for n_cand in range(num_mult_choices):
# print ' ', n_cand,
nQuestionWords = question_lengths[j]
#############################
# run text encoder first time
#############################
s1_t1, s1_t2, c1_t1, c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_question_j[n_cand, i:i + 1]
# print input_question.size() # (1,300)
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat((s1_t1, s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a, sV_t2a, cV_t1a, cV_t2a = s1_t1, s1_t2, c1_t1, c1_t2
sV_t1m, sV_t2m, cV_t1m, cV_t2m = s1_t1, s1_t2, c1_t1, c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
feat_att_m = vgg[i:i + 1]
feat_att_a = c3d[i:i + 1]
# print feat_att_a.size(), feat_att_m.size() # (1, 4096) (1, 4096)
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0), 1, sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0), 1, sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0), 1, sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0), 1, sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a, sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m, sV_l2m), dim=2)
#############################
# Add Co-memory network
#############################
M1 = torch.cat([sV_t1a, sV_t2a], dim=1)
M2 = torch.cat([sV_t1m, sV_t2m], dim=1)
ma = M1.detach()
mb = M2.detach()
M1 = M1.view(M1.size(0), 1, M1.size(1))
M2 = M2.view(M2.size(0), 1, M2.size(1))
for hop in range(3):
mm = ma + mb
M1 = self.epm1(sV_lla, mm, M1)
M2 = self.epm2(sV_llm, mm, M2)
M1 = M1.view(M1.size(0), M1.size(2))
M2 = M2.view(M2.size(0), M2.size(2))
maq = torch.cat([ma, M1, s1_t], dim=1)
mbq = torch.cat([mb, M2, s1_t], dim=1)
ma = torch.tanh(self.ma_decoder(maq))
mb = torch.tanh(self.mb_decoder(mbq))
M = torch.cat((ma, mb), dim=1)
#########################
# decode the final output
#########################
out = torch.tanh(self.linear_decoder_count_1(M))
output = self.linear_decoder_count_2(out)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j, 1)
# print('Output_j size', outputs_j.size()) # (1,5)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_, mx_idx = torch.max(outputs_j, 1)
predictions.append(mx_idx)
# print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
# print outputs.size(), predictions.size()
return outputs, predictions
def accuracy(self, logits, targets):
correct = torch.sum(logits.eq(targets)).float()
return correct * 100.0 / targets.size(0)
class AttentionTwoStream(nn.Module):
def __init__(self, opt, feat_channel, feat_dim, text_embed_size, hidden_size, vocab_size, num_layers, word_matrix,
answer_vocab_size=None, max_len=20, dropout=0.2, mm_version=1, useSpatial=False, useNaive=False, mrmUseOriginFeat=False):
"""Set the hyper-parameters and build the layers."""
super(AttentionTwoStream, self).__init__()
self.opt = opt
# text input size
self.text_embed_size = text_embed_size # should be 300
# video input size
self.feat_channel = feat_channel
self.feat_dim = feat_dim # should be 7
self.hidden_size = hidden_size
self.num_layers = num_layers
self.useNaive = useNaive
self.mrmUseOriginFeat = mrmUseOriginFeat
self.useSpatial = useSpatial
self.mm_version = mm_version
self.TpAtt_a = TemporalAttentionModule(hidden_size*2, hidden_size)
self.TpAtt_m = TemporalAttentionModule(hidden_size*2, hidden_size)
self.video_encoder = nn.Linear(feat_channel, hidden_size)
self.drop_keep_prob_final_att_vec = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, text_embed_size)
self.lstm_text_1 = nn.LSTMCell(text_embed_size, hidden_size)
self.lstm_text_2 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1a = nn.LSTMCell(4096, hidden_size)
self.lstm_video_2a = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1m = nn.LSTMCell(4096, hidden_size)
self.lstm_video_2m = nn.LSTMCell(hidden_size, hidden_size)
self.dropout = nn.Dropout(0.2)
if mm_version==1:
self.lstm_mm_1 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_mm_2 = nn.LSTMCell(hidden_size, hidden_size)
self.linear_decoder_mem = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_1 = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_2 = nn.Linear(hidden_size * 2, hidden_size)
else:
self.gru_mm = nn.GRUCell(hidden_size, hidden_size)
self.linear_decoder_mem = nn.Linear(hidden_size, hidden_size)
self.linear_decoder_att_a = nn.Linear(hidden_size * 2, hidden_size)
self.linear_decoder_att_m = nn.Linear(hidden_size * 2, hidden_size)
if answer_vocab_size is not None:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2 + hidden_size, answer_vocab_size)
else:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2 + hidden_size, 1) # Count is regression problem
self.max_len = max_len
if mrmUseOriginFeat:
self.mrm_vid = MemoryRamTwoStreamModule(hidden_size, hidden_size, max_len)
self.mrm_txt = MemoryRamModule(text_embed_size, hidden_size, max_len)
else:
self.mrm_vid = MemoryRamTwoStreamModule(hidden_size, hidden_size, max_len)
self.mrm_txt = MemoryRamModule(hidden_size, hidden_size, max_len)
self.init_weights(word_matrix)
#####################################################################################################################
if self.opt.pool_type is not "default":
activation = 'leaky_relu'
dropout = 0.2
choices = {
"default": None,
"LinearSum": fusions.LinearSum(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"ConcatMLP": fusions.ConcatMLP(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, dimensions=[opt.pool_hidden_dim, opt.pool_hidden_dim],#[500,500]
activation=activation,
dropout=dropout
),
"MCB": fusions.MCB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #16000 ## Not usable with pytorch 1.0 or late apparently
activ_output=activation,
dropout_output=dropout
),
"MFH": fusions.MFH(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, factor=2, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"MFB": fusions.MFB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, factor=2, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation, normalize=True,
dropout_input=dropout, dropout_pre_norm=dropout, dropout_output=dropout
),
"MLB": fusions.MLB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation, normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Block": fusions.Block(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Tucker": fusions.Tucker(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"BlockTucker": fusions.BlockTucker(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Mutan": fusions.Mutan(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
)
}
blp = choices[self.opt.pool_type]
if useNaive:
self.mm_att = MultiModalNaiveModule(hidden_size)
else:
self.mm_att = MultiModalAttentionModule(opt, blp, hidden_size)
#####################################################################################################################
def init_weights(self, word_matrix):
if word_matrix is None:
self.embed.weight.data.uniform_(-0.1, 0.1)
else:
# init embed from glove
self.embed.weight.data.copy_(torch.from_numpy(word_matrix))
self.mrm_vid.init_weights()
self.mrm_txt.init_weights()
def init_hiddens(self):
s_t = torch.zeros(1, self.hidden_size).cuda()
s_t2 = torch.zeros(1, self.hidden_size).cuda()
c_t = torch.zeros(1, self.hidden_size).cuda()
c_t2 = torch.zeros(1, self.hidden_size).cuda()
return s_t,s_t2,c_t,c_t2
def mm_module_v1(self,svt_tmp,memory_ram_vid,memory_ram_txt,loop=3):
sm_q1,sm_q2,cm_q1,cm_q2 = self.init_hiddens()
mm_oo = self.drop_keep_prob_final_att_vec(torch.tanh(self.hidden_encoder_1(svt_tmp)))
for _ in range(loop):
sm_q1, cm_q1 = self.lstm_mm_1(mm_oo, (sm_q1, cm_q1))
sm_q2, cm_q2 = self.lstm_mm_2(sm_q1, (sm_q2, cm_q2))
mm_o1 = self.mm_att(sm_q2,memory_ram_vid,memory_ram_txt)
mm_o2 = torch.cat((sm_q2,mm_o1),dim=1)
mm_oo = self.drop_keep_prob_final_att_vec(torch.tanh(self.hidden_encoder_2(mm_o2)))
smq = torch.cat( (sm_q1,sm_q2), dim=1)
return smq
def mm_module_v2(self,memory_ram_vid,memory_ram_txt,loop=5):
h_t = torch.zeros(1, self.hidden_size).cuda()
for _ in range(loop):
mm_o = self.mm_att(h_t,memory_ram_vid,memory_ram_txt)
h_t = self.gru_mm(mm_o, h_t)
return h_t
def forward(self, data_dict):
ret = self.forward_action(data_dict)
return ret
def forward_action(self, data_dict):
video_features = data_dict['video_features']
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
num_mult_choices = data_dict['num_mult_choices']
outputs = []
predictions = []
batch_size = len(questions)
for j in range(batch_size):
features_question_j = self.embed(questions[j])
vgg, c3d = video_features[0][j], video_features[1][j]
nImg = vgg.shape[0]
outputs_j = []
for n_cand in range(num_mult_choices):
#print ' ', n_cand,
nQuestionWords = question_lengths[j]
################################
# slice the input image features
################################
#feature = video_features[j,video_features.size(1)-nImg:]
#print('current video feature size', feature.size())
#############################
# run text encoder first time
#############################
s1_t1,s1_t2,c1_t1,c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_question_j[n_cand,i:i+1]
#print input_question.size() # (1,300)
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat((s1_t1,s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a,sV_t2a,cV_t1a,cV_t2a = s1_t1,s1_t2,c1_t1,c1_t2
sV_t1m,sV_t2m,cV_t1m,cV_t2m = s1_t1,s1_t2,c1_t1,c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
feat_att_m = vgg[i:i+1]
feat_att_a = c3d[i:i+1]
#print feat_att_a.size(), feat_att_m.size() # (1, 4096) (1, 4096)
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0),1,sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0),1,sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0),1,sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0),1,sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a,sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m,sV_l2m), dim=2)
#############################
# run text encoder second time
#############################
sT_t1,sT_t2,cT_t1,cT_t2 = self.init_hiddens()
sT_t1,sT_t2 = sV_t1a+sV_t1m, sV_t2a+sV_t2m
hidden_array_3 = []
for i in xrange(nQuestionWords):
input_question = features_question_j[n_cand,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
hidden_array_3.append(sT_t2)
#print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat( (sT_t1, sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att_a = self.TpAtt_a(sV_lla, sT_t)
vid_att_m = self.TpAtt_m(sV_llm, sT_t)
################
# ram memory
################
sT_rl = torch.cat(hidden_array_3, dim=0)
memory_ram_vid = self.mrm_vid(sV_l2a[0,:,:], sV_l2m[0,:,:], nImg)
memory_ram_txt = self.mrm_txt(sT_rl, nQuestionWords)
if self.mm_version==1:
svt_tmp = torch.cat((sV_t2a,sV_t2m),dim=1)
smq = self.mm_module_v1(svt_tmp,memory_ram_vid,memory_ram_txt)
elif self.mm_version==2:
smq = self.mm_module_v2(memory_ram_vid,memory_ram_txt)
#########################
# decode the final output
#########################
final_embed_a = torch.tanh(self.linear_decoder_att_a(vid_att_a))
final_embed_m = torch.tanh(self.linear_decoder_att_m(vid_att_m))
final_embed_2 = torch.tanh(self.linear_decoder_mem(smq))
final_embed = torch.cat([final_embed_a, final_embed_m, final_embed_2], dim=1)
output = self.linear_decoder_count_2(final_embed)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j,1)
#print('Output_j size', outputs_j.size()) # (1,5)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_,mx_idx = torch.max(outputs_j, 1)
predictions.append(mx_idx)
#print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
#print outputs.size(), predictions.size()
return outputs, predictions
def accuracy(self, logits, targets):
#print(logits.size(), targets.size(), logits.type(), targets.type())
#print(logits,targets)
#targets = targets.int()
correct = torch.sum(logits.eq(targets)).float()
#print(correct,targets.size(0))
#print(correct, targets.size(0), correct * 100.0 / targets.size(0))
return correct * 100.0 / targets.size(0)
################################################################################################
################################################################################################
################################################################################################
################################################################################################
################################################################################################
class TGIFAttentionTwoStream(nn.Module):
def __init__(self, opt, task, feat_channel, feat_dim, text_embed_size, hidden_size, vocab_size, num_layers, word_matrix,
answer_vocab_size=None, max_len=20, dropout=0.2, mm_version=1, useSpatial=False, useNaive=False, iter_num=3):
"""Set the hyper-parameters and build the layers."""
super(TGIFAttentionTwoStream, self).__init__()
self.task = task
self.opt = opt
# text input size
self.text_embed_size = text_embed_size # should be 300
# video input size
self.feat_channel = feat_channel
self.feat_dim = feat_dim # should be 7
self.hidden_size = hidden_size
self.num_layers = num_layers
self.useNaive = useNaive
self.useSpatial = useSpatial
self.mm_version = mm_version
self.TpAtt_a = TemporalAttentionModule(hidden_size*2, hidden_size)
self.TpAtt_m = TemporalAttentionModule(hidden_size*2, hidden_size)
if useSpatial:
self.SpAtt = SpatialAttentionModule(feat_channel, feat_dim, hidden_size)
else:
self.video_encoder = nn.Linear(feat_channel, hidden_size)
self.drop_keep_prob_final_att_vec = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, text_embed_size)
# ####
# self.bert = BertModel.from_pretrained('bert-base-uncased')
# for param in self.bert.parameters():
# param.requires_grad = False
# self.bert_fc = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(768, 300),
# nn.Tanh(),
# )
# ####
self.lstm_text_1 = nn.LSTMCell(text_embed_size, hidden_size)
self.lstm_text_2 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1a = nn.LSTMCell(2048, hidden_size)
self.lstm_video_2a = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_video_1m = nn.LSTMCell(4096, hidden_size)
self.lstm_video_2m = nn.LSTMCell(hidden_size, hidden_size)
self.iter_num = iter_num
if mm_version==1:
self.lstm_mm_1 = nn.LSTMCell(hidden_size, hidden_size)
self.lstm_mm_2 = nn.LSTMCell(hidden_size, hidden_size)
self.linear_decoder_mem = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_1 = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_encoder_2 = nn.Linear(hidden_size * 2, hidden_size)
else:
self.gru_mm = nn.GRUCell(hidden_size, hidden_size)
self.linear_decoder_mem = nn.Linear(hidden_size, hidden_size)
self.linear_decoder_att_a = nn.Linear(hidden_size * 2, hidden_size)
self.linear_decoder_att_m = nn.Linear(hidden_size * 2, hidden_size)
if answer_vocab_size is not None:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2 + hidden_size, answer_vocab_size)
else:
self.linear_decoder_count_2 = nn.Linear(hidden_size * 2 + hidden_size, 1) # Count is regression problem
self.max_len = max_len
self.mrm_vid = MemoryRamTwoStreamModule(hidden_size, hidden_size, max_len)
self.mrm_txt = MemoryRamModule(hidden_size, hidden_size, max_len)
self.init_weights(word_matrix)
#####################################################################################################################
if self.opt.pool_type is not "default":
activation = 'leaky_relu'
dropout = 0.2
choices = {
"default": None,
"LinearSum": fusions.LinearSum(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"ConcatMLP": fusions.ConcatMLP(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, dimensions=[opt.pool_hidden_dim, opt.pool_hidden_dim],#[500,500]
activation=activation,
dropout=dropout
),
"MCB": fusions.MCB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #16000 ## Not usable with pytorch 1.0 or late apparently
activ_output=activation,
dropout_output=dropout
),
"MFH": fusions.MFH(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, factor=2, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"MFB": fusions.MFB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, factor=2, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation, normalize=True,
dropout_input=dropout, dropout_pre_norm=dropout, dropout_output=dropout
),
"MLB": fusions.MLB(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1200
activ_input=activation, activ_output=activation, normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Block": fusions.Block(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Tucker": fusions.Tucker(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"BlockTucker": fusions.BlockTucker(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
),
"Mutan": fusions.Mutan(
input_dims=opt.pool_in_dims, output_dim=opt.pool_out_dim, mm_dim=opt.pool_hidden_dim, #1600 NO ACITVATIONS IN HERE CURRENTLY
normalize=True,
dropout_input=dropout, dropout_pre_lin=dropout, dropout_output=dropout
)
}
blp = choices[self.opt.pool_type]
self.mm_att = MultiModalAttentionModule(opt, blp, hidden_size)
#####################################################################################################################
def init_weights(self, word_matrix):
"""Initialize weights."""
if word_matrix is None:
self.embed.weight.data.uniform_(-0.1, 0.1)
else:
# init embed from glove
self.embed.weight.data.copy_(torch.from_numpy(word_matrix))
self.mrm_vid.init_weights()
self.mrm_txt.init_weights()
def init_hiddens(self):
s_t = torch.zeros(1, self.hidden_size).cuda()
s_t2 = torch.zeros(1, self.hidden_size).cuda()
c_t = torch.zeros(1, self.hidden_size).cuda()
c_t2 = torch.zeros(1, self.hidden_size).cuda()
return s_t,s_t2,c_t,c_t2
def mm_module_v1(self,svt_tmp,memory_ram_vid,memory_ram_txt,loop=3):
sm_q1,sm_q2,cm_q1,cm_q2 = self.init_hiddens()
mm_oo = self.drop_keep_prob_final_att_vec(torch.tanh(self.hidden_encoder_1(svt_tmp)))
for _ in range(loop):
sm_q1, cm_q1 = self.lstm_mm_1(mm_oo, (sm_q1, cm_q1))
sm_q2, cm_q2 = self.lstm_mm_2(sm_q1, (sm_q2, cm_q2))
mm_o1 = self.mm_att(sm_q2,memory_ram_vid,memory_ram_txt)
mm_o2 = torch.cat((sm_q2,mm_o1),dim=1)
mm_oo = self.drop_keep_prob_final_att_vec(torch.tanh(self.hidden_encoder_2(mm_o2)))
smq = torch.cat( (sm_q1,sm_q2), dim=1)
return smq
def mm_module_v2(self,memory_ram_vid,memory_ram_txt,loop=5):
h_t = torch.zeros(1, self.hidden_size).cuda()
for _ in range(loop):
mm_o = self.mm_att(h_t,memory_ram_vid,memory_ram_txt)
h_t = self.gru_mm(mm_o, h_t)
return h_t
def forward(self, data_dict, question_type='Count'):
if question_type=='Count':
ret = self.forward_count(data_dict)
elif question_type=='Action':
ret = self.forward_action(data_dict)
elif question_type=='Trans':
ret = self.forward_trans(data_dict)
else:
assert question_type=='FrameQA'
ret = self.forward_frameqa(data_dict)
return ret
def forward_count(self, data_dict):
video_features, numImgs = data_dict['video_features'], data_dict['video_lengths'],
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
answers = data_dict['answers']
outputs = []
predictions = []
bsize = len(questions)
batch_size = len(questions) # batch size has to be 1
cnt = 0
features_questions = self.embed(questions)
for j in range(batch_size):
nImg = numImgs[j]
nQuestionWords = question_lengths[j]
################################
# slice the input image features
################################
feature = video_features[j,video_features.size(1)-nImg:]
#print('current video feature size', feature.size())
#############################
# run text encoder first time
#############################
s1_t1,s1_t2,c1_t1,c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_questions[j,i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat( (s1_t1,s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a,sV_t2a,cV_t1a,cV_t2a = s1_t1,s1_t2,c1_t1,c1_t2
sV_t1m,sV_t2m,cV_t1m,cV_t2m = s1_t1,s1_t2,c1_t1,c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
if self.useSpatial:
input_frame = feature[i:i+1]
feat_att = self.SpAtt(input_frame, s1_t)
else:
feat_att_m = feature[i:i+1,0,0,:4096]
feat_att_a = feature[i:i+1,0,0,4096:]
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0),1,sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0),1,sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0),1,sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0),1,sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
# assume sV_t1 is of size (1,1,hidden)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a,sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m,sV_l2m), dim=2)
#############################
# run text encoder second time
#############################
sT_t1,sT_t2,cT_t1,cT_t2 = self.init_hiddens()
sT_t1,sT_t2 = sV_t1a+sV_t1m, sV_t2a+sV_t2m
hidden_array_3 = []
# here sT_t1, sT_t2 are the last hiddens from video, input to text encoder again
for i in xrange(nQuestionWords):
input_question = features_questions[j,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
hidden_array_3.append(sT_t2)
#print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat( (sT_t1,sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att_a = self.TpAtt_a(sV_lla, sT_t)
vid_att_m = self.TpAtt_m(sV_llm, sT_t)
################
# ram memory
################
sT_rl = torch.cat(hidden_array_3, dim=0)
memory_ram_vid = self.mrm_vid(sV_l2a[0,:,:], sV_l2m[0,:,:], nImg)
memory_ram_txt = self.mrm_txt(sT_rl, nQuestionWords)
if self.mm_version==1:
svt_tmp = torch.cat((sV_t2a,sV_t2m),dim=1)
smq = self.mm_module_v1(svt_tmp,memory_ram_vid,memory_ram_txt,self.iter_num)
elif self.mm_version==2:
smq = self.mm_module_v2(memory_ram_vid,memory_ram_txt)
#########################
# decode the final output
#########################
final_embed_a = torch.tanh( self.linear_decoder_att_a(vid_att_a) )
final_embed_m = torch.tanh( self.linear_decoder_att_m(vid_att_m) )
final_embed_2 = torch.tanh( self.linear_decoder_mem(smq) )
final_embed = torch.cat([final_embed_a,final_embed_m,final_embed_2],dim=1)
output = self.linear_decoder_count_2(final_embed)
prediction = torch.clamp(torch.round(output.detach()), min=1, max=10).int()
outputs.append(output)
predictions.append(prediction)
outputs = torch.cat(outputs, 0)
#targets = torch.cat(targets, 0)
predictions = torch.cat(predictions, 0)
#print(predictions.size())
return outputs, answers, predictions
def forward_action(self, data_dict):
video_features, numImgs = data_dict['video_features'], data_dict['video_lengths'],
questions, question_lengths = data_dict['candidates'], data_dict['candidate_lengths']
answers, num_mult_choices = data_dict['answers'], data_dict['num_mult_choices']
outputs = []
predictions = []
#print(questions.size()) # (N, 5, 35), 5 multiple choices, each choice is a question of 35 max lengths
batch_size = len(questions)
cnt = 0
########
#Flatten the batch and 5 candidates, process them with bert and FC layer and then recover them
features_questions = self.embed(questions)
# questions = questions.view(-1, questions.shape[-1])
# features_questions = self.bert(questions)[0][11] # Final bert layer
# features_questions = self.bert_fc(features_questions)
# features_questions = features_questions.view(batch_size, 5, questions.shape[-1], 300)
########
for j in range(batch_size):
nImg = numImgs[j]
outputs_j = []
predictions_j = []
for n_cand in range(num_mult_choices):
#print ' ', n_cand,
nQuestionWords = question_lengths[j][n_cand]
#nAnwserWords = candidate_lengths[j][n_cand]
################################
# slice the input image features
################################
feature = video_features[j,video_features.size(1)-nImg:]
#print('current video feature size', feature.size())
#############################
# run text encoder first time
#############################
s1_t1,s1_t2,c1_t1,c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_questions[j,n_cand,i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat( (s1_t1,s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a,sV_t2a,cV_t1a,cV_t2a = s1_t1,s1_t2,c1_t1,c1_t2
sV_t1m,sV_t2m,cV_t1m,cV_t2m = s1_t1,s1_t2,c1_t1,c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
if self.useSpatial:
input_frame = feature[i:i+1]
feat_att = self.SpAtt(input_frame, s1_t)
else:
feat_att_m = feature[i:i+1,0,0,:4096]
feat_att_a = feature[i:i+1,0,0,4096:]
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0),1,sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0),1,sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0),1,sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0),1,sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a,sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m,sV_l2m), dim=2)
#############################
# run text encoder second time
#############################
sT_t1,sT_t2,cT_t1,cT_t2 = self.init_hiddens()
sT_t1,sT_t2 = sV_t1a+sV_t1m, sV_t2a+sV_t2m
hidden_array_3 = []
for i in xrange(nQuestionWords):
input_question = features_questions[j,n_cand,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
hidden_array_3.append(sT_t2)
#print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat( (sT_t1,sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att_a = self.TpAtt_a(sV_lla, sT_t)
vid_att_m = self.TpAtt_m(sV_llm, sT_t)
################
# ram memory
################
sT_rl = torch.cat(hidden_array_3, dim=0)
memory_ram_vid = self.mrm_vid(sV_l2a[0,:,:], sV_l2m[0,:,:], nImg)
memory_ram_txt = self.mrm_txt(sT_rl, nQuestionWords)
if self.mm_version==1:
svt_tmp = torch.cat((sV_t2a,sV_t2m),dim=1)
smq = self.mm_module_v1(svt_tmp,memory_ram_vid,memory_ram_txt,self.iter_num)
elif self.mm_version==2:
smq = self.mm_module_v2(memory_ram_vid,memory_ram_txt)
#########################
# decode the final output
#########################
final_embed_a = torch.tanh( self.linear_decoder_att_a(vid_att_a) )
final_embed_m = torch.tanh( self.linear_decoder_att_m(vid_att_m) )
final_embed_2 = torch.tanh( self.linear_decoder_mem(smq) )
final_embed = torch.cat([final_embed_a,final_embed_m,final_embed_2],dim=1)
output = self.linear_decoder_count_2(final_embed)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j,1)
#print('Output_j size', outputs_j.size()) # (1,5)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_,mx_idx = torch.max(outputs_j,1)
predictions.append(mx_idx)
#print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
return outputs, answers, predictions
def forward_trans(self, data_dict):
video_features, numImgs = data_dict['video_features'], data_dict['video_lengths'],
questions, question_lengths = data_dict['candidates'], data_dict['candidate_lengths']
answers, num_mult_choices = data_dict['answers'], data_dict['num_mult_choices']
outputs = []
predictions = []
#print(questions.size()) # (N, 5, 35), 5 multiple choices, each choice is a question of 35 max lengths
batch_size = len(questions)
cnt = 0
features_questions = self.embed(questions)
for j in range(batch_size):
nImg = numImgs[j]
outputs_j = []
predictions_j = []
for n_cand in range(num_mult_choices):
nQuestionWords = question_lengths[j][n_cand]
#nAnwserWords = answer_lengths[j]
################################
# slice the input image features
################################
feature = video_features[j,video_features.size(1)-nImg:]
#print('current video feature size', feature.size())
#############################
# run text encoder first time
#############################
s1_t1,s1_t2,c1_t1,c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_questions[j,n_cand,i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat( (s1_t1,s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a,sV_t2a,cV_t1a,cV_t2a = s1_t1,s1_t2,c1_t1,c1_t2
sV_t1m,sV_t2m,cV_t1m,cV_t2m = s1_t1,s1_t2,c1_t1,c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
if self.useSpatial:
input_frame = feature[i:i+1]
feat_att = self.SpAtt(input_frame, s1_t)
else:
feat_att_m = feature[i:i+1,0,0,:4096]
feat_att_a = feature[i:i+1,0,0,4096:]
# lstm
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0),1,sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0),1,sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0),1,sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0),1,sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a,sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m,sV_l2m), dim=2)
#############################
# run text encoder second time
#############################
sT_t1,sT_t2,cT_t1,cT_t2 = self.init_hiddens()
sT_t1,sT_t2 = sV_t1a+sV_t1m, sV_t2a+sV_t2m
hidden_array_3 = []
for i in xrange(nQuestionWords):
input_question = features_questions[j,n_cand,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
hidden_array_3.append(sT_t2)
#print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat( (sT_t1,sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att_a = self.TpAtt_a(sV_lla, sT_t)
vid_att_m = self.TpAtt_m(sV_llm, sT_t)
################
# stack memory
################
sT_rl = torch.cat(hidden_array_3, dim=0)
memory_ram_vid = self.mrm_vid(sV_l2a[0,:,:], sV_l2m[0,:,:], nImg)
memory_ram_txt = self.mrm_txt(sT_rl, nQuestionWords)
if self.mm_version==1:
svt_tmp = torch.cat((sV_t2a,sV_t2m),dim=1)
smq = self.mm_module_v1(svt_tmp,memory_ram_vid,memory_ram_txt,self.iter_num)
elif self.mm_version==2:
smq = self.mm_module_v2(memory_ram_vid,memory_ram_txt)
#########################
# decode the final output
#########################
final_embed_a = torch.tanh( self.linear_decoder_att_a(vid_att_a) )
final_embed_m = torch.tanh( self.linear_decoder_att_m(vid_att_m) )
final_embed_2 = torch.tanh( self.linear_decoder_mem(smq) )
final_embed = torch.cat([final_embed_a,final_embed_m,final_embed_2],dim=1)
output = self.linear_decoder_count_2(final_embed)
outputs_j.append(output)
# output is the score of each multiple choice
outputs_j = torch.cat(outputs_j,1)
#print('Output_j size', outputs_j.size()) # (1,5)
outputs.append(outputs_j)
# for evaluate accuracy, find the max one
_,mx_idx = torch.max(outputs_j,1)
predictions.append(mx_idx)
#print(outputs_j,mx_idx)
outputs = torch.cat(outputs, 0)
predictions = torch.cat(predictions, 0)
return outputs, answers, predictions
def forward_frameqa(self, data_dict):
video_features, numImgs = data_dict['video_features'], data_dict['video_lengths'],
questions, question_lengths = data_dict['question_words'], data_dict['question_lengths']
answers = data_dict['answers']
outputs = []
predictions = []
bsize = len(questions)
batch_size = len(questions) # batch size has to be 1
cnt = 0
features_questions = self.embed(questions)
# (64, 35, 300)
#print('text feature size', features_questions.size())
for j in range(batch_size):
nImg = numImgs[j]
nQuestionWords = question_lengths[j]
################################
# slice the input image features
################################
feature = video_features[j,video_features.size(1)-nImg:]
#print('current video feature size', feature.size())
#############################
# run text encoder first time
#############################
s1_t1,s1_t2,c1_t1,c1_t2 = self.init_hiddens()
for i in xrange(nQuestionWords):
input_question = features_questions[j,i:i+1]
s1_t1, c1_t1 = self.lstm_text_1(input_question, (s1_t1, c1_t1))
s1_t2, c1_t2 = self.lstm_text_2(s1_t1, (s1_t2, c1_t2))
# here s1_t1, s1_t2 is the last hidden
s1_t = torch.cat( (s1_t1,s1_t2), dim=1) # should be of size (1,1024)
###########################################
# run video encoder with spatial attention
###########################################
sV_t1a,sV_t2a,cV_t1a,cV_t2a = s1_t1,s1_t2,c1_t1,c1_t2
sV_t1m,sV_t2m,cV_t1m,cV_t2m = s1_t1,s1_t2,c1_t1,c1_t2
# record each time t, hidden states, for later temporal attention after text encoding
hidden_array_1a = []
hidden_array_2a = []
hidden_array_1m = []
hidden_array_2m = []
for i in xrange(nImg):
if self.useSpatial:
input_frame = feature[i:i+1]
feat_att = self.SpAtt(input_frame, s1_t)
else:
feat_att_m = feature[i:i+1,0,0,:4096]
feat_att_a = feature[i:i+1,0,0,4096:]
sV_t1m, cV_t1m = self.lstm_video_1m(feat_att_m, (sV_t1m, cV_t1m))
sV_t2m, cV_t2m = self.lstm_video_2m(sV_t1m, (sV_t2m, cV_t2m))
sV_t1a, cV_t1a = self.lstm_video_1a(feat_att_a, (sV_t1a, cV_t1a))
sV_t2a, cV_t2a = self.lstm_video_2a(sV_t1a, (sV_t2a, cV_t2a))
sV_t1a_vec = sV_t1a.view(sV_t1a.size(0),1,sV_t1a.size(1))
sV_t2a_vec = sV_t2a.view(sV_t2a.size(0),1,sV_t2a.size(1))
hidden_array_1a.append(sV_t1a_vec)
hidden_array_2a.append(sV_t2a_vec)
sV_t1m_vec = sV_t1m.view(sV_t1m.size(0),1,sV_t1m.size(1))
sV_t2m_vec = sV_t2m.view(sV_t2m.size(0),1,sV_t2m.size(1))
hidden_array_1m.append(sV_t1m_vec)
hidden_array_2m.append(sV_t2m_vec)
sV_l1a = torch.cat(hidden_array_1a, dim=1)
sV_l2a = torch.cat(hidden_array_2a, dim=1)
sV_l1m = torch.cat(hidden_array_1m, dim=1)
sV_l2m = torch.cat(hidden_array_2m, dim=1)
sV_lla = torch.cat((sV_l1a,sV_l2a), dim=2)
sV_llm = torch.cat((sV_l1m,sV_l2m), dim=2)
#############################
# run text encoder second time
#############################
sT_t1,sT_t2,cT_t1,cT_t2 = self.init_hiddens()
sT_t1,sT_t2 = sV_t1a+sV_t1m, sV_t2a+sV_t2m
hidden_array_3 = []
for i in xrange(nQuestionWords):
input_question = features_questions[j,i:i+1]
sT_t1, cT_t1 = self.lstm_text_1(input_question, (sT_t1, cT_t1))
sT_t2, cT_t2 = self.lstm_text_2(sT_t1, (sT_t2, cT_t2))
hidden_array_3.append(sT_t2)
#print('Text encoding One size', sT_t1.size(), sT_t2.size())
# here sT_t1, sT_t2 is the last hidden
sT_t = torch.cat( (sT_t1,sT_t2), dim=1) # should be of size (1,1024)
#####################
# temporal attention
#####################
vid_att_a = self.TpAtt_a(sV_lla, sT_t)
vid_att_m = self.TpAtt_m(sV_llm, sT_t)
################
# ram memory
################
sT_rl = torch.cat(hidden_array_3, dim=0)
memory_ram_vid = self.mrm_vid(sV_l2a[0,:,:], sV_l2m[0,:,:], nImg)
memory_ram_txt = self.mrm_txt(sT_rl, nQuestionWords)
if self.mm_version==1:
svt_tmp = torch.cat((sV_t2a,sV_t2m),dim=1)
smq = self.mm_module_v1(svt_tmp,memory_ram_vid,memory_ram_txt,self.iter_num)
elif self.mm_version==2:
smq = self.mm_module_v2(memory_ram_vid,memory_ram_txt)
#########################
# decode the final output
#########################
final_embed_a = torch.tanh( self.linear_decoder_att_a(vid_att_a) )
final_embed_m = torch.tanh( self.linear_decoder_att_m(vid_att_m) )
final_embed_2 = torch.tanh( self.linear_decoder_mem(smq) )
final_embed = torch.cat([final_embed_a,final_embed_m,final_embed_2],dim=1)
output = self.linear_decoder_count_2(final_embed)
#print('Output size', output.size()) # (1,5)
outputs.append(output)
_,mx_idx = torch.max(output,1)
predictions.append(mx_idx)
#print(output,mx_idx)
outputs = torch.cat(outputs, 0)
#targets = torch.cat(targets, 0)
predictions = torch.cat(predictions, 0)
#print(predictions.size())
return outputs, answers[:,0], predictions
def accuracy(self, logits, targets):
correct = torch.sum(logits.eq(targets)).float()
return correct * 100.0 / targets.size(0)
| 42.688265
| 162
| 0.543619
| 12,140
| 94,213
| 3.922735
| 0.037397
| 0.055437
| 0.021167
| 0.023939
| 0.933203
| 0.925098
| 0.91569
| 0.903973
| 0.898072
| 0.891248
| 0
| 0.043203
| 0.321675
| 94,213
| 2,206
| 163
| 42.707616
| 0.701973
| 0.137635
| 0
| 0.885434
| 0
| 0
| 0.009785
| 0.000533
| 0
| 0
| 0
| 0
| 0.001637
| 1
| 0.040098
| false
| 0
| 0.006547
| 0
| 0.079378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a811aa16a0003bd23aa04d63aea6cf7ee0a60ee
| 21,699
|
py
|
Python
|
demo/python/connectapi_demo/demo.py
|
ConnectAPI/AutoGeneratedSDKs
|
9f8a6882c34b55daa53b87635ef0226cd83beadf
|
[
"MIT"
] | null | null | null |
demo/python/connectapi_demo/demo.py
|
ConnectAPI/AutoGeneratedSDKs
|
9f8a6882c34b55daa53b87635ef0226cd83beadf
|
[
"MIT"
] | null | null | null |
demo/python/connectapi_demo/demo.py
|
ConnectAPI/AutoGeneratedSDKs
|
9f8a6882c34b55daa53b87635ef0226cd83beadf
|
[
"MIT"
] | null | null | null |
"""
Auto generated file, DO NOT EDIT!
"""
from typing import List, Any, Union
from .schemas import (
BodyFileOptional,
BodyFile,
BodyFormOptional,
BodyForm,
HTTPValidationError,
Nested,
TestIn,
ValidationError,
)
class Demo:
def __init__(self, client):
self.client = client
self.service_prefix = '/demo'
def sum_two_numbers(self, a: int, b: int, ):
path = '/math/sum'.format()
query = {'a': a, 'b': b, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def multiply_two_numbers(self, a: int, b: int, ):
path = '/math/mul'.format()
query = {'a': a, 'b': b, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def random_int_from_range(self, a: int, b: int, ):
path = '/random/range'.format()
query = {'a': a, 'b': b, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def random_string(self, n: int, ):
path = '/random/string'.format()
query = {'n': n, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def get_test(self, ):
path = '/test/get_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def post_test(self, ):
path = '/test/post_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def put_test(self, ):
path = '/test/put_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('put', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def delete_test_test(self, ):
path = '/test/delete_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('delete', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def options_test(self, ):
path = '/test/options_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('options', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def head_test(self, ):
path = '/test/head_test'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('head', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def query_test(self, a: int, b: str, c: bool, d: float, ):
path = '/test/query_test'.format()
query = {'a': a, 'b': b, 'c': c, 'd': d, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def query_optional_test(self, a: int = None, b: str = None, c: bool = None, d: float = None, ):
path = '/test/query_optional_test'.format()
query = {'a': a, 'b': b, 'c': c, 'd': d, }
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def body_test(self, body: TestIn, ):
path = '/test/body_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict()
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def body_optional_test(self, body: TestIn = None):
path = '/test/body_optional_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict() if body is not None else {}
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def headers_test(self, a: str, ):
path = '/test/headers_test'.format()
query = {}
headers = {'a': a, }
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def headers_optional_test(self, a: str = None, ):
path = '/test/headers_optional_test'.format()
query = {}
headers = {'a': a, }
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def form_test(self, body: BodyForm, ):
path = '/test/form_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict()
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/x-www-form-urlencoded")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def form_optional_test(self, body: BodyFormOptional = None):
path = '/test/form_optional_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict() if body is not None else {}
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/x-www-form-urlencoded")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def cookies_test(self, a: str, ):
path = '/test/cookies_test'.format()
query = {}
headers = {}
cookies = {'a': a, }
body = {}
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def cookies_optional_test(self, a: str = None, ):
path = '/test/cookies_optional_test'.format()
query = {}
headers = {}
cookies = {'a': a, }
body = {}
response = self.client.request('post', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def file_test(self, body: BodyFile, ):
path = '/test/file_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict()
response = self.client.request('put', self.service_prefix, path, query, body, headers, cookies, content_type="multipart/form-data")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def file_optional_test(self, body: BodyFileOptional = None):
path = '/test/file_optional_test'.format()
query = {}
headers = {}
cookies = {}
body = body.to_dict() if body is not None else {}
response = self.client.request('put', self.service_prefix, path, query, body, headers, cookies, content_type="multipart/form-data")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def path_test(self, a: str, b: int, c: float, ):
path = '/test/path_test/{a}/{b}/{c}'.format(a=a, b=b, c=c, )
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('put', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
if response.status_code == 422:
"""Validation Error"""
if response.headers.get("Content-Type", None) == "application/json":
return HTTPValidationError.from_dict(response.json())
return response
def r_test_str(self, ):
path = '/test/r_test_str'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_int(self, ):
path = '/test/r_test_int'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_none(self, ):
path = '/test/r_test_none'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_float(self, ):
path = '/test/r_test_float'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_array(self, ):
path = '/test/r_test_array'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_dict(self, ):
path = '/test/r_test_dict'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_no_200(self, ):
path = '/test/r_test_no_200'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 201:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_3xx(self, ):
path = '/test/r_test_3xx'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 300:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
def r_test_redirect(self, ):
path = '/test/r_test_redirect'.format()
query = {}
headers = {}
cookies = {}
body = {}
response = self.client.request('get', self.service_prefix, path, query, body, headers, cookies, content_type="application/json")
if response.status_code == 200:
"""Successful Response"""
if response.headers.get("Content-Type", None) == "application/json":
return response.json()
return response
| 40.332714
| 154
| 0.567952
| 2,231
| 21,699
| 5.417302
| 0.042134
| 0.081086
| 0.095317
| 0.081086
| 0.917094
| 0.903773
| 0.897154
| 0.89674
| 0.891445
| 0.88648
| 0
| 0.010144
| 0.29582
| 21,699
| 537
| 155
| 40.407821
| 0.780825
| 0.001521
| 0
| 0.796651
| 1
| 0
| 0.128393
| 0.012937
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.004785
| 0
| 0.279904
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6aa39996602daf28428152f15366826d0b22562f
| 12,559
|
py
|
Python
|
pynos/versions/base/yang/brocade_hardware.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 12
|
2015-09-21T23:56:09.000Z
|
2018-03-30T04:35:32.000Z
|
pynos/versions/base/yang/brocade_hardware.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 10
|
2016-09-15T19:03:27.000Z
|
2017-07-17T23:38:01.000Z
|
pynos/versions/base/yang/brocade_hardware.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 6
|
2015-08-14T08:05:23.000Z
|
2022-02-03T15:33:54.000Z
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_hardware(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def hardware_connector_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector = ET.SubElement(hardware, "connector")
name = ET.SubElement(connector, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_sfp_breakout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector = ET.SubElement(hardware, "connector")
name_key = ET.SubElement(connector, "name")
name_key.text = kwargs.pop('name')
sfp = ET.SubElement(connector, "sfp")
breakout = ET.SubElement(sfp, "breakout")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_port_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
port_group = ET.SubElement(hardware, "port-group")
name = ET.SubElement(port_group, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_port_group_mode_performance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
port_group = ET.SubElement(hardware, "port-group")
name_key = ET.SubElement(port_group, "name")
name_key.text = kwargs.pop('name')
mode = ET.SubElement(port_group, "mode")
performance = ET.SubElement(mode, "performance")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_group_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector_group = ET.SubElement(hardware, "connector-group")
id = ET.SubElement(connector_group, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_group_speed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector_group = ET.SubElement(hardware, "connector-group")
id_key = ET.SubElement(connector_group, "id")
id_key.text = kwargs.pop('id')
speed = ET.SubElement(connector_group, "speed")
speed.text = kwargs.pop('speed')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id = ET.SubElement(flexport, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
type = ET.SubElement(flexport_type, "type")
type.text = kwargs.pop('type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
instance = ET.SubElement(flexport_type, "instance")
instance.text = kwargs.pop('instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_skip_deconfig(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
skip_deconfig = ET.SubElement(flexport_type, "skip_deconfig")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_flexports_output_flexport_list_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_flexports = ET.Element("get_flexports")
config = get_flexports
output = ET.SubElement(get_flexports, "output")
flexport_list = ET.SubElement(output, "flexport-list")
port_id = ET.SubElement(flexport_list, "port-id")
port_id.text = kwargs.pop('port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector = ET.SubElement(hardware, "connector")
name = ET.SubElement(connector, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_sfp_breakout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector = ET.SubElement(hardware, "connector")
name_key = ET.SubElement(connector, "name")
name_key.text = kwargs.pop('name')
sfp = ET.SubElement(connector, "sfp")
breakout = ET.SubElement(sfp, "breakout")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_port_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
port_group = ET.SubElement(hardware, "port-group")
name = ET.SubElement(port_group, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_port_group_mode_performance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
port_group = ET.SubElement(hardware, "port-group")
name_key = ET.SubElement(port_group, "name")
name_key.text = kwargs.pop('name')
mode = ET.SubElement(port_group, "mode")
performance = ET.SubElement(mode, "performance")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_group_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector_group = ET.SubElement(hardware, "connector-group")
id = ET.SubElement(connector_group, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_connector_group_speed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector_group = ET.SubElement(hardware, "connector-group")
id_key = ET.SubElement(connector_group, "id")
id_key.text = kwargs.pop('id')
speed = ET.SubElement(connector_group, "speed")
speed.text = kwargs.pop('speed')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id = ET.SubElement(flexport, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
type = ET.SubElement(flexport_type, "type")
type.text = kwargs.pop('type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
instance = ET.SubElement(flexport_type, "instance")
instance.text = kwargs.pop('instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hardware_flexport_flexport_type_skip_deconfig(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop('id')
flexport_type = ET.SubElement(flexport, "flexport_type")
skip_deconfig = ET.SubElement(flexport_type, "skip_deconfig")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_flexports_output_flexport_list_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_flexports = ET.Element("get_flexports")
config = get_flexports
output = ET.SubElement(get_flexports, "output")
flexport_list = ET.SubElement(output, "flexport-list")
port_id = ET.SubElement(flexport_list, "port-id")
port_id.text = kwargs.pop('port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| 40.908795
| 99
| 0.626961
| 1,372
| 12,559
| 5.600583
| 0.037901
| 0.137428
| 0.047371
| 0.074831
| 0.982561
| 0.982561
| 0.982561
| 0.982561
| 0.982561
| 0.982561
| 0
| 0
| 0.240545
| 12,559
| 307
| 100
| 40.908795
| 0.80562
| 0.054543
| 0
| 0.981132
| 1
| 0
| 0.156816
| 0.062931
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108491
| false
| 0
| 0.004717
| 0
| 0.221698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6ac9cf0e1a932a4106105683dc73005f0b5e9da7
| 21,659
|
py
|
Python
|
pyteal/ast/asset_test.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | null | null | null |
pyteal/ast/asset_test.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | 1
|
2022-03-04T14:57:57.000Z
|
2022-03-04T14:57:57.000Z
|
pyteal/ast/asset_test.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | null | null | null |
import pytest
import pyteal as pt
teal2Options = pt.CompileOptions()
teal4Options = pt.CompileOptions(version=4)
teal5Options = pt.CompileOptions(version=5)
def test_asset_holding_balance():
args = pt.Int(0), pt.Int(17)
expr = pt.AssetHolding.balance(args[0], args[1])
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(args[0], pt.Op.int, 0),
pt.TealOp(args[1], pt.Op.int, 17),
pt.TealOp(expr, pt.Op.asset_holding_get, "AssetBalance"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_holding_balance_direct_ref():
args = [pt.Txn.sender(), pt.Txn.assets[17]]
expr = pt.AssetHolding.balance(args[0], args[1])
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(args[0], pt.Op.txn, "Sender"),
pt.TealOp(args[1], pt.Op.txna, "Assets", 17),
pt.TealOp(expr, pt.Op.asset_holding_get, "AssetBalance"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_holding_balance_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetHolding.balance(pt.Txn.sender(), pt.Bytes("100"))
with pytest.raises(pt.TealTypeError):
pt.AssetHolding.balance(pt.Int(0), pt.Txn.receiver())
def test_asset_holding_frozen():
args = [pt.Int(0), pt.Int(17)]
expr = pt.AssetHolding.frozen(args[0], args[1])
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(args[0], pt.Op.int, 0),
pt.TealOp(args[1], pt.Op.int, 17),
pt.TealOp(expr, pt.Op.asset_holding_get, "AssetFrozen"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_holding_frozen_direct_ref():
args = [pt.Txn.sender(), pt.Txn.assets[17]]
expr = pt.AssetHolding.frozen(args[0], args[1])
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(args[0], pt.Op.txn, "Sender"),
pt.TealOp(args[1], pt.Op.txna, "Assets", 17),
pt.TealOp(expr, pt.Op.asset_holding_get, "AssetFrozen"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_holding_frozen_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetHolding.frozen(pt.Txn.sender(), pt.Bytes("17"))
with pytest.raises(pt.TealTypeError):
pt.AssetHolding.frozen(pt.Int(0), pt.Txn.receiver())
def test_asset_param_total():
arg = pt.Int(0)
expr = pt.AssetParam.total(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetTotal"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_total_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.total(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetTotal"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_total_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.total(pt.Txn.sender())
def test_asset_param_decimals():
arg = pt.Int(0)
expr = pt.AssetParam.decimals(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetDecimals"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_decimals_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.decimals(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetDecimals"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_decimals_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.decimals(pt.Txn.sender())
def test_asset_param_default_frozen():
arg = pt.Int(0)
expr = pt.AssetParam.defaultFrozen(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetDefaultFrozen"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_default_frozen_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.defaultFrozen(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.uint64
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetDefaultFrozen"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_default_frozen_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.defaultFrozen(pt.Txn.sender())
def test_asset_param_unit_name():
arg = pt.Int(0)
expr = pt.AssetParam.unitName(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetUnitName"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_unit_name_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.unitName(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetUnitName"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_unit_name_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.unitName(pt.Txn.sender())
def test_asset_param_name():
arg = pt.Int(0)
expr = pt.AssetParam.name(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetName"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_name_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.name(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetName"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_name_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.name(pt.Txn.sender())
def test_asset_param_url():
arg = pt.Int(0)
expr = pt.AssetParam.url(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetURL"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_url_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.url(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetURL"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_url_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.url(pt.Txn.sender())
def test_asset_param_metadata_hash():
arg = pt.Int(0)
expr = pt.AssetParam.metadataHash(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetMetadataHash"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_metadata_hash_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.metadataHash(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetMetadataHash"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_metadata_hash_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.metadataHash(pt.Txn.sender())
def test_asset_param_manager():
arg = pt.Int(0)
expr = pt.AssetParam.manager(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetManager"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_manager_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.manager(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetManager"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_manager_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.manager(pt.Txn.sender())
def test_asset_param_reserve():
arg = pt.Int(2)
expr = pt.AssetParam.reserve(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 2),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetReserve"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_reserve_direct_ref():
arg = pt.Txn.assets[2]
expr = pt.AssetParam.reserve(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 2),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetReserve"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_reserve_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.reserve(pt.Txn.sender())
def test_asset_param_freeze():
arg = pt.Int(0)
expr = pt.AssetParam.freeze(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetFreeze"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_freeze_direct_ref():
arg = pt.Txn.assets[0]
expr = pt.AssetParam.freeze(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 0),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetFreeze"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_freeze_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.freeze(pt.Txn.sender())
def test_asset_param_clawback():
arg = pt.Int(1)
expr = pt.AssetParam.clawback(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 1),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetClawback"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal2Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_clawback_direct_ref():
arg = pt.Txn.assets[1]
expr = pt.AssetParam.clawback(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.txna, "Assets", 1),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetClawback"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal4Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_clawback_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.clawback(pt.Txn.sender())
def test_asset_param_creator_valid():
arg = pt.Int(1)
expr = pt.AssetParam.creator(arg)
assert expr.type_of() == pt.TealType.none
assert expr.value().type_of() == pt.TealType.bytes
expected = pt.TealSimpleBlock(
[
pt.TealOp(arg, pt.Op.int, 1),
pt.TealOp(expr, pt.Op.asset_params_get, "AssetCreator"),
pt.TealOp(None, pt.Op.store, expr.slotOk),
pt.TealOp(None, pt.Op.store, expr.slotValue),
]
)
actual, _ = expr.__teal__(teal5Options)
actual.addIncoming()
actual = pt.TealBlock.NormalizeBlocks(actual)
with pt.TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_asset_param_creator_invalid():
with pytest.raises(pt.TealTypeError):
pt.AssetParam.creator(pt.Txn.sender())
| 30.54866
| 74
| 0.641212
| 2,654
| 21,659
| 5.076112
| 0.036549
| 0.066508
| 0.032067
| 0.064133
| 0.979513
| 0.969567
| 0.963999
| 0.934531
| 0.894596
| 0.866983
| 0
| 0.008391
| 0.224156
| 21,659
| 708
| 75
| 30.591808
| 0.793323
| 0
| 0
| 0.715342
| 0
| 0
| 0.019622
| 0
| 0
| 0
| 0
| 0
| 0.149723
| 1
| 0.075786
| false
| 0
| 0.003697
| 0
| 0.079482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ae356c0410b3d06736810b1ce9dfb23119df59f
| 47,879
|
py
|
Python
|
python/src/chirpstack_api/as_pb/integration/integration_pb2.py
|
hoellejal/chirpstack-api
|
cadfed3f9f2f7947e31c45e3aa849ce4a4d55629
|
[
"MIT"
] | null | null | null |
python/src/chirpstack_api/as_pb/integration/integration_pb2.py
|
hoellejal/chirpstack-api
|
cadfed3f9f2f7947e31c45e3aa849ce4a4d55629
|
[
"MIT"
] | null | null | null |
python/src/chirpstack_api/as_pb/integration/integration_pb2.py
|
hoellejal/chirpstack-api
|
cadfed3f9f2f7947e31c45e3aa849ce4a4d55629
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chirpstack-api/as_pb/integration/integration.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from chirpstack_api.common import common_pb2 as chirpstack__api_dot_common_dot_common__pb2
from chirpstack_api.gw import gw_pb2 as chirpstack__api_dot_gw_dot_gw__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='chirpstack-api/as_pb/integration/integration.proto',
package='integration',
syntax='proto3',
serialized_options=b'Z6github.com/brocaar/chirpstack-api/go/v3/as/integration',
serialized_pb=b'\n2chirpstack-api/as_pb/integration/integration.proto\x12\x0bintegration\x1a\"chirpstack-api/common/common.proto\x1a\x1a\x63hirpstack-api/gw/gw.proto\"\x88\x03\n\x0bUplinkEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12!\n\x07rx_info\x18\x05 \x03(\x0b\x32\x10.gw.UplinkRXInfo\x12!\n\x07tx_info\x18\x06 \x01(\x0b\x32\x10.gw.UplinkTXInfo\x12\x0b\n\x03\x61\x64r\x18\x07 \x01(\x08\x12\n\n\x02\x64r\x18\x08 \x01(\r\x12\r\n\x05\x66_cnt\x18\t \x01(\r\x12\x0e\n\x06\x66_port\x18\n \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x0b \x01(\x0c\x12\x1f\n\x0bobject_json\x18\x0c \x01(\tR\nobjectJSON\x12\x30\n\x04tags\x18\r \x03(\x0b\x32\".integration.UplinkEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xbb\x02\n\tJoinEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12\x10\n\x08\x64\x65v_addr\x18\x05 \x01(\x0c\x12!\n\x07rx_info\x18\x06 \x03(\x0b\x32\x10.gw.UplinkRXInfo\x12!\n\x07tx_info\x18\x07 \x01(\x0b\x32\x10.gw.UplinkTXInfo\x12\n\n\x02\x64r\x18\x08 \x01(\r\x12.\n\x04tags\x18\t \x03(\x0b\x32 .integration.JoinEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xfa\x01\n\x08\x41\x63kEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12\x14\n\x0c\x61\x63knowledged\x18\x05 \x01(\x08\x12\r\n\x05\x66_cnt\x18\x06 \x01(\r\x12-\n\x04tags\x18\x07 \x03(\x0b\x32\x1f.integration.AckEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe8\x01\n\nTxAckEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12\r\n\x05\x66_cnt\x18\x05 \x01(\r\x12/\n\x04tags\x18\x06 \x03(\x0b\x32!.integration.TxAckEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x9d\x02\n\nErrorEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12$\n\x04type\x18\x05 \x01(\x0e\x32\x16.integration.ErrorType\x12\r\n\x05\x65rror\x18\x06 \x01(\t\x12\r\n\x05\x66_cnt\x18\x07 \x01(\r\x12/\n\x04tags\x18\x08 \x03(\x0b\x32!.integration.ErrorEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc4\x02\n\x0bStatusEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12\x0e\n\x06margin\x18\x05 \x01(\r\x12\x1d\n\x15\x65xternal_power_source\x18\x06 \x01(\x08\x12!\n\x19\x62\x61ttery_level_unavailable\x18\x07 \x01(\x08\x12\x15\n\rbattery_level\x18\x08 \x01(\x02\x12\x30\n\x04tags\x18\t \x03(\x0b\x32\".integration.StatusEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa2\x02\n\rLocationEvent\x12%\n\x0e\x61pplication_id\x18\x01 \x01(\x04R\rapplicationID\x12\x18\n\x10\x61pplication_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\x0cR\x06\x64\x65vEUI\x12\"\n\x08location\x18\x05 \x01(\x0b\x32\x10.common.Location\x12\x32\n\x04tags\x18\x06 \x03(\x0b\x32$.integration.LocationEvent.TagsEntry\x12\x1d\n\nuplink_ids\x18\x07 \x03(\x0cR\tuplinkIDs\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*\xd3\x01\n\tErrorType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x19\n\x15\x44OWNLINK_PAYLOAD_SIZE\x10\x01\x12\x11\n\rDOWNLINK_FCNT\x10\x02\x12\x10\n\x0cUPLINK_CODEC\x10\x03\x12\x12\n\x0e\x44OWNLINK_CODEC\x10\x04\x12\x08\n\x04OTAA\x10\x05\x12\x15\n\x11UPLINK_FCNT_RESET\x10\x06\x12\x0e\n\nUPLINK_MIC\x10\x07\x12\x1e\n\x1aUPLINK_FCNT_RETRANSMISSION\x10\x08\x12\x14\n\x10\x44OWNLINK_GATEWAY\x10\tB8Z6github.com/brocaar/chirpstack-api/go/v3/as/integrationb\x06proto3'
,
dependencies=[chirpstack__api_dot_common_dot_common__pb2.DESCRIPTOR,chirpstack__api_dot_gw_dot_gw__pb2.DESCRIPTOR,])
_ERRORTYPE = _descriptor.EnumDescriptor(
name='ErrorType',
full_name='integration.ErrorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOWNLINK_PAYLOAD_SIZE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOWNLINK_FCNT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPLINK_CODEC', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOWNLINK_CODEC', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTAA', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPLINK_FCNT_RESET', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPLINK_MIC', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPLINK_FCNT_RETRANSMISSION', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOWNLINK_GATEWAY', index=9, number=9,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2241,
serialized_end=2452,
)
_sym_db.RegisterEnumDescriptor(_ERRORTYPE)
ErrorType = enum_type_wrapper.EnumTypeWrapper(_ERRORTYPE)
UNKNOWN = 0
DOWNLINK_PAYLOAD_SIZE = 1
DOWNLINK_FCNT = 2
UPLINK_CODEC = 3
DOWNLINK_CODEC = 4
OTAA = 5
UPLINK_FCNT_RESET = 6
UPLINK_MIC = 7
UPLINK_FCNT_RETRANSMISSION = 8
DOWNLINK_GATEWAY = 9
_UPLINKEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.UplinkEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.UplinkEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.UplinkEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_UPLINKEVENT = _descriptor.Descriptor(
name='UplinkEvent',
full_name='integration.UplinkEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.UplinkEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.UplinkEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.UplinkEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.UplinkEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rx_info', full_name='integration.UplinkEvent.rx_info', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tx_info', full_name='integration.UplinkEvent.tx_info', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='adr', full_name='integration.UplinkEvent.adr', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dr', full_name='integration.UplinkEvent.dr', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='integration.UplinkEvent.f_cnt', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f_port', full_name='integration.UplinkEvent.f_port', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='integration.UplinkEvent.data', index=10,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_json', full_name='integration.UplinkEvent.object_json', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectJSON', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.UplinkEvent.tags', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPLINKEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=524,
)
_JOINEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.JoinEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.JoinEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.JoinEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_JOINEVENT = _descriptor.Descriptor(
name='JoinEvent',
full_name='integration.JoinEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.JoinEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.JoinEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.JoinEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.JoinEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_addr', full_name='integration.JoinEvent.dev_addr', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rx_info', full_name='integration.JoinEvent.rx_info', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tx_info', full_name='integration.JoinEvent.tx_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dr', full_name='integration.JoinEvent.dr', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.JoinEvent.tags', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOINEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=527,
serialized_end=842,
)
_ACKEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.AckEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.AckEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.AckEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_ACKEVENT = _descriptor.Descriptor(
name='AckEvent',
full_name='integration.AckEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.AckEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.AckEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.AckEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.AckEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='acknowledged', full_name='integration.AckEvent.acknowledged', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='integration.AckEvent.f_cnt', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.AckEvent.tags', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ACKEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=845,
serialized_end=1095,
)
_TXACKEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.TxAckEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.TxAckEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.TxAckEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_TXACKEVENT = _descriptor.Descriptor(
name='TxAckEvent',
full_name='integration.TxAckEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.TxAckEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.TxAckEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.TxAckEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.TxAckEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='integration.TxAckEvent.f_cnt', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.TxAckEvent.tags', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TXACKEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1098,
serialized_end=1330,
)
_ERROREVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.ErrorEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.ErrorEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.ErrorEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_ERROREVENT = _descriptor.Descriptor(
name='ErrorEvent',
full_name='integration.ErrorEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.ErrorEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.ErrorEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.ErrorEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.ErrorEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='integration.ErrorEvent.type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='integration.ErrorEvent.error', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='integration.ErrorEvent.f_cnt', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.ErrorEvent.tags', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ERROREVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1333,
serialized_end=1618,
)
_STATUSEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.StatusEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.StatusEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.StatusEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_STATUSEVENT = _descriptor.Descriptor(
name='StatusEvent',
full_name='integration.StatusEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.StatusEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.StatusEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.StatusEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.StatusEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='margin', full_name='integration.StatusEvent.margin', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='external_power_source', full_name='integration.StatusEvent.external_power_source', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='battery_level_unavailable', full_name='integration.StatusEvent.battery_level_unavailable', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='battery_level', full_name='integration.StatusEvent.battery_level', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.StatusEvent.tags', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_STATUSEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1621,
serialized_end=1945,
)
_LOCATIONEVENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='integration.LocationEvent.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='integration.LocationEvent.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='integration.LocationEvent.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=524,
)
_LOCATIONEVENT = _descriptor.Descriptor(
name='LocationEvent',
full_name='integration.LocationEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='application_id', full_name='integration.LocationEvent.application_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='applicationID', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='application_name', full_name='integration.LocationEvent.application_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='integration.LocationEvent.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='integration.LocationEvent.dev_eui', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='location', full_name='integration.LocationEvent.location', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='integration.LocationEvent.tags', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uplink_ids', full_name='integration.LocationEvent.uplink_ids', index=6,
number=7, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='uplinkIDs', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LOCATIONEVENT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1948,
serialized_end=2238,
)
_UPLINKEVENT_TAGSENTRY.containing_type = _UPLINKEVENT
_UPLINKEVENT.fields_by_name['rx_info'].message_type = chirpstack__api_dot_gw_dot_gw__pb2._UPLINKRXINFO
_UPLINKEVENT.fields_by_name['tx_info'].message_type = chirpstack__api_dot_gw_dot_gw__pb2._UPLINKTXINFO
_UPLINKEVENT.fields_by_name['tags'].message_type = _UPLINKEVENT_TAGSENTRY
_JOINEVENT_TAGSENTRY.containing_type = _JOINEVENT
_JOINEVENT.fields_by_name['rx_info'].message_type = chirpstack__api_dot_gw_dot_gw__pb2._UPLINKRXINFO
_JOINEVENT.fields_by_name['tx_info'].message_type = chirpstack__api_dot_gw_dot_gw__pb2._UPLINKTXINFO
_JOINEVENT.fields_by_name['tags'].message_type = _JOINEVENT_TAGSENTRY
_ACKEVENT_TAGSENTRY.containing_type = _ACKEVENT
_ACKEVENT.fields_by_name['tags'].message_type = _ACKEVENT_TAGSENTRY
_TXACKEVENT_TAGSENTRY.containing_type = _TXACKEVENT
_TXACKEVENT.fields_by_name['tags'].message_type = _TXACKEVENT_TAGSENTRY
_ERROREVENT_TAGSENTRY.containing_type = _ERROREVENT
_ERROREVENT.fields_by_name['type'].enum_type = _ERRORTYPE
_ERROREVENT.fields_by_name['tags'].message_type = _ERROREVENT_TAGSENTRY
_STATUSEVENT_TAGSENTRY.containing_type = _STATUSEVENT
_STATUSEVENT.fields_by_name['tags'].message_type = _STATUSEVENT_TAGSENTRY
_LOCATIONEVENT_TAGSENTRY.containing_type = _LOCATIONEVENT
_LOCATIONEVENT.fields_by_name['location'].message_type = chirpstack__api_dot_common_dot_common__pb2._LOCATION
_LOCATIONEVENT.fields_by_name['tags'].message_type = _LOCATIONEVENT_TAGSENTRY
DESCRIPTOR.message_types_by_name['UplinkEvent'] = _UPLINKEVENT
DESCRIPTOR.message_types_by_name['JoinEvent'] = _JOINEVENT
DESCRIPTOR.message_types_by_name['AckEvent'] = _ACKEVENT
DESCRIPTOR.message_types_by_name['TxAckEvent'] = _TXACKEVENT
DESCRIPTOR.message_types_by_name['ErrorEvent'] = _ERROREVENT
DESCRIPTOR.message_types_by_name['StatusEvent'] = _STATUSEVENT
DESCRIPTOR.message_types_by_name['LocationEvent'] = _LOCATIONEVENT
DESCRIPTOR.enum_types_by_name['ErrorType'] = _ERRORTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UplinkEvent = _reflection.GeneratedProtocolMessageType('UplinkEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _UPLINKEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.UplinkEvent.TagsEntry)
})
,
'DESCRIPTOR' : _UPLINKEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.UplinkEvent)
})
_sym_db.RegisterMessage(UplinkEvent)
_sym_db.RegisterMessage(UplinkEvent.TagsEntry)
JoinEvent = _reflection.GeneratedProtocolMessageType('JoinEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _JOINEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.JoinEvent.TagsEntry)
})
,
'DESCRIPTOR' : _JOINEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.JoinEvent)
})
_sym_db.RegisterMessage(JoinEvent)
_sym_db.RegisterMessage(JoinEvent.TagsEntry)
AckEvent = _reflection.GeneratedProtocolMessageType('AckEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _ACKEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.AckEvent.TagsEntry)
})
,
'DESCRIPTOR' : _ACKEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.AckEvent)
})
_sym_db.RegisterMessage(AckEvent)
_sym_db.RegisterMessage(AckEvent.TagsEntry)
TxAckEvent = _reflection.GeneratedProtocolMessageType('TxAckEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _TXACKEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.TxAckEvent.TagsEntry)
})
,
'DESCRIPTOR' : _TXACKEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.TxAckEvent)
})
_sym_db.RegisterMessage(TxAckEvent)
_sym_db.RegisterMessage(TxAckEvent.TagsEntry)
ErrorEvent = _reflection.GeneratedProtocolMessageType('ErrorEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _ERROREVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.ErrorEvent.TagsEntry)
})
,
'DESCRIPTOR' : _ERROREVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.ErrorEvent)
})
_sym_db.RegisterMessage(ErrorEvent)
_sym_db.RegisterMessage(ErrorEvent.TagsEntry)
StatusEvent = _reflection.GeneratedProtocolMessageType('StatusEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _STATUSEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.StatusEvent.TagsEntry)
})
,
'DESCRIPTOR' : _STATUSEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.StatusEvent)
})
_sym_db.RegisterMessage(StatusEvent)
_sym_db.RegisterMessage(StatusEvent.TagsEntry)
LocationEvent = _reflection.GeneratedProtocolMessageType('LocationEvent', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _LOCATIONEVENT_TAGSENTRY,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.LocationEvent.TagsEntry)
})
,
'DESCRIPTOR' : _LOCATIONEVENT,
'__module__' : 'chirpstack_api.as_pb.integration.integration_pb2'
# @@protoc_insertion_point(class_scope:integration.LocationEvent)
})
_sym_db.RegisterMessage(LocationEvent)
_sym_db.RegisterMessage(LocationEvent.TagsEntry)
DESCRIPTOR._options = None
_UPLINKEVENT_TAGSENTRY._options = None
_JOINEVENT_TAGSENTRY._options = None
_ACKEVENT_TAGSENTRY._options = None
_TXACKEVENT_TAGSENTRY._options = None
_ERROREVENT_TAGSENTRY._options = None
_STATUSEVENT_TAGSENTRY._options = None
_LOCATIONEVENT_TAGSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 44.332407
| 4,656
| 0.742643
| 6,220
| 47,879
| 5.436174
| 0.044373
| 0.057729
| 0.056517
| 0.048147
| 0.827285
| 0.782036
| 0.771183
| 0.760388
| 0.738769
| 0.705054
| 0
| 0.044801
| 0.128679
| 47,879
| 1,079
| 4,657
| 44.373494
| 0.765713
| 0.023768
| 0
| 0.715696
| 1
| 0.000987
| 0.218874
| 0.176687
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00691
| 0
| 0.00691
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0a744a1105f1d5622dbfda63ee122f407322de6c
| 71,488
|
py
|
Python
|
tests/mesh/mesh_data.py
|
KyleVaughn/mocmg
|
228f1ab751c5f584b51cd047bd9b37e2252eb629
|
[
"MIT"
] | null | null | null |
tests/mesh/mesh_data.py
|
KyleVaughn/mocmg
|
228f1ab751c5f584b51cd047bd9b37e2252eb629
|
[
"MIT"
] | null | null | null |
tests/mesh/mesh_data.py
|
KyleVaughn/mocmg
|
228f1ab751c5f584b51cd047bd9b37e2252eb629
|
[
"MIT"
] | null | null | null |
"""The vertex, cell, cell set data for a variety of test meshes."""
import numpy as np
# linear triangle data
linear_triangle_vertices = {
1: np.array([-1.00000005e00, 1.06043306e-15, -5.00000000e-08]),
2: np.array([-1.0000001e00, -1.0000001e00, -1.0000000e-07]),
3: np.array([-1.05896103e-13, -1.00000005e00, -5.00000000e-08]),
4: np.array([1.0, 0.0, 0.0]),
5: np.array([1.0000001e00, -1.0000001e00, -1.0000000e-07]),
6: np.array([-1.0000001e00, 1.0000001e00, -1.0000000e-07]),
7: np.array([-1.08549196e-13, 1.00000005e00, -5.00000000e-08]),
8: np.array([1.0000001e00, 1.0000001e00, -1.0000000e-07]),
9: np.array([-0.70710678, -0.70710678, 0.0]),
10: np.array([0.70710678, -0.70710678, 0.0]),
11: np.array([-0.70710678, 0.70710678, 0.0]),
12: np.array([0.70710678, 0.70710678, 0.0]),
13: np.array([-0.26876582, 0.11132644, 0.0]),
14: np.array([0.36327769, -0.15047455, 0.0]),
15: np.array([0.19024025, 0.44768302, 0.0]),
16: np.array([-0.18203951, -0.45107992, 0.0]),
}
linear_triangle_cells = {
"triangle": {
1: np.array([1, 16, 13]),
2: np.array([9, 16, 1]),
3: np.array([12, 15, 4]),
4: np.array([13, 15, 11]),
5: np.array([11, 15, 7]),
6: np.array([10, 16, 3]),
7: np.array([4, 15, 14]),
8: np.array([14, 16, 10]),
9: np.array([1, 13, 11]),
10: np.array([4, 14, 10]),
11: np.array([7, 15, 12]),
12: np.array([3, 16, 9]),
13: np.array([14, 15, 13]),
14: np.array([13, 16, 14]),
15: np.array([2, 9, 1]),
16: np.array([2, 3, 9]),
17: np.array([10, 5, 4]),
18: np.array([3, 5, 10]),
19: np.array([1, 11, 6]),
20: np.array([6, 11, 7]),
21: np.array([12, 4, 8]),
22: np.array([7, 12, 8]),
},
}
linear_triangle_cell_sets = {
"DISK": np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
"MATERIAL_VOID": np.array([15, 16, 17, 18, 19, 20, 21, 22]),
}
# quadratic triangle mesh
quadratic_triangle_vertices = {
1: np.array([-1.00000005e00, 1.06043306e-15, -5.00000000e-08]),
2: np.array([-1.0000001e00, -1.0000001e00, -1.0000000e-07]),
3: np.array([-1.05896103e-13, -1.00000005e00, -5.00000000e-08]),
4: np.array([1.0, 0.0, 0.0]),
5: np.array([1.0000001e00, -1.0000001e00, -1.0000000e-07]),
6: np.array([-1.0000001e00, 1.0000001e00, -1.0000000e-07]),
7: np.array([-1.08549196e-13, 1.00000005e00, -5.00000000e-08]),
8: np.array([1.0000001e00, 1.0000001e00, -1.0000000e-07]),
9: np.array([-1.00000010e00, -5.00841281e-01, -1.00000000e-07]),
10: np.array([-0.70708298, -0.70713058, 0.0]),
11: np.array([-0.91703972, -0.39879588, 0.0]),
12: np.array([-0.39884673, -0.9170176, 0.0]),
13: np.array([-5.00840823e-01, -1.00000010e00, -1.00000000e-07]),
14: np.array([0.70708302, -0.70713055, 0.0]),
15: np.array([0.39889911, -0.91699482, 0.0]),
16: np.array([0.9170161, -0.39885019, 0.0]),
17: np.array([1.0000001e00, -5.0084097e-01, -1.0000000e-07]),
18: np.array([5.00840536e-01, -1.00000010e00, -1.00000000e-07]),
19: np.array([-1.00000010e00, 5.00840994e-01, -1.00000000e-07]),
20: np.array([-5.00840522e-01, 1.00000010e00, -1.00000000e-07]),
21: np.array([-0.70708007, 0.7071335, 0.0]),
22: np.array([-0.3988993, 0.91699474, 0.0]),
23: np.array([-0.91701595, 0.39885053, 0.0]),
24: np.array([0.70708387, 0.70712969, 0.0]),
25: np.array([0.91703964, 0.39879607, 0.0]),
26: np.array([0.39884522, 0.91701826, 0.0]),
27: np.array([5.00840838e-01, 1.00000010e00, -1.00000000e-07]),
28: np.array([1.0000001e00, 5.0084126e-01, -1.0000000e-07]),
29: np.array([-0.26874902, 0.11131954, 0.0]),
30: np.array([0.36325451, -0.15046501, 0.0]),
31: np.array([0.19021621, 0.4476254, 0.0]),
32: np.array([-0.182016, -0.45102208, 0.0]),
33: np.array([-0.59112954, -0.22558921, 0.0]),
34: np.array([-0.22540267, -0.16987674, 0.0]),
35: np.array([-0.63443287, 0.05569767, 0.0]),
36: np.array([-0.44461198, -0.57917949, 0.0]),
37: np.array([0.44871232, 0.57747943, 0.0]),
38: np.array([0.59523122, 0.22389143, 0.0]),
39: np.array([-0.03926279, 0.27950473, 0.0]),
40: np.array([-0.25847266, 0.57750255, 0.0]),
41: np.array([-0.48799137, 0.4092244, 0.0]),
42: np.array([0.09515393, 0.72394368, 0.0]),
43: np.array([0.26257348, -0.57920237, 0.0]),
44: np.array([-0.09105243, -0.72564101, 0.0]),
45: np.array([0.27675898, 0.14860423, 0.0]),
46: np.array([0.68170888, -0.07528174, 0.0]),
47: np.array([0.0906191, -0.30077723, 0.0]),
48: np.array([0.53526786, -0.42880511, 0.0]),
49: np.array([0.04725594, -0.01957406, 0.0]),
50: np.array([-8.47201857e-01, -8.47201870e-01, -1.00000000e-07]),
51: np.array([8.47201868e-01, -8.47201877e-01, -1.00000000e-07]),
52: np.array([-8.47201866e-01, 8.47201879e-01, -1.00000000e-07]),
53: np.array([8.47201860e-01, 8.47201868e-01, -1.00000000e-07]),
}
quadratic_triangle_cells = {
"triangle6": {
1: np.array([1, 32, 29, 33, 34, 35]),
2: np.array([10, 32, 1, 36, 33, 11]),
3: np.array([24, 31, 4, 37, 38, 25]),
4: np.array([29, 31, 21, 39, 40, 41]),
5: np.array([21, 31, 7, 40, 42, 22]),
6: np.array([14, 32, 3, 43, 44, 15]),
7: np.array([4, 31, 30, 38, 45, 46]),
8: np.array([30, 32, 14, 47, 43, 48]),
9: np.array([1, 29, 21, 35, 41, 23]),
10: np.array([4, 30, 14, 46, 48, 16]),
11: np.array([7, 31, 24, 42, 37, 26]),
12: np.array([3, 32, 10, 44, 36, 12]),
13: np.array([30, 31, 29, 45, 39, 49]),
14: np.array([29, 32, 30, 34, 47, 49]),
15: np.array([2, 10, 1, 50, 11, 9]),
16: np.array([2, 3, 10, 13, 12, 50]),
17: np.array([14, 5, 4, 51, 17, 16]),
18: np.array([3, 5, 14, 18, 51, 15]),
19: np.array([1, 21, 6, 23, 52, 19]),
20: np.array([6, 21, 7, 52, 22, 20]),
21: np.array([24, 4, 8, 25, 28, 53]),
22: np.array([7, 24, 8, 26, 53, 27]),
},
}
quadratic_triangle_cell_sets = {
"DISK": np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
"MATERIAL_VOID": np.array([15, 16, 17, 18, 19, 20, 21, 22]),
}
# Linear quadrilateral
linear_quadrilateral_vertices = {
1: np.array([-1.00000005e00, 1.06043306e-15, -5.00000000e-08]),
2: np.array([-1.0000001e00, -1.0000001e00, -1.0000000e-07]),
3: np.array([-1.05896103e-13, -1.00000005e00, -5.00000000e-08]),
4: np.array([1.0, 0.0, 0.0]),
5: np.array([1.0000001e00, -1.0000001e00, -1.0000000e-07]),
6: np.array([-1.0000001e00, 1.0000001e00, -1.0000000e-07]),
7: np.array([-1.08549196e-13, 1.00000005e00, -5.00000000e-08]),
8: np.array([1.0000001e00, 1.0000001e00, -1.0000000e-07]),
9: np.array([-1.0000001e00, -5.0000005e-01, -1.0000000e-07]),
10: np.array([-0.70710678, -0.70710678, 0.0]),
11: np.array([-0.38268343, -0.92387953, 0.0]),
12: np.array([-0.92387953, -0.38268343, 0.0]),
13: np.array([-5.0000005e-01, -1.0000001e00, -1.0000000e-07]),
14: np.array([0.70710678, -0.70710678, 0.0]),
15: np.array([0.38268343, -0.92387953, 0.0]),
16: np.array([0.92387953, -0.38268343, 0.0]),
17: np.array([1.0000001e00, -5.0000005e-01, -1.0000000e-07]),
18: np.array([5.0000005e-01, -1.0000001e00, -1.0000000e-07]),
19: np.array([-1.0000001e00, 5.0000005e-01, -1.0000000e-07]),
20: np.array([-5.0000005e-01, 1.0000001e00, -1.0000000e-07]),
21: np.array([-0.70710678, 0.70710678, 0.0]),
22: np.array([-0.38268343, 0.92387953, 0.0]),
23: np.array([-0.92387953, 0.38268343, 0.0]),
24: np.array([0.70710678, 0.70710678, 0.0]),
25: np.array([0.38268343, 0.92387953, 0.0]),
26: np.array([0.92387953, 0.38268343, 0.0]),
27: np.array([5.0000005e-01, 1.0000001e00, -1.0000000e-07]),
28: np.array([1.0000001e00, 5.0000005e-01, -1.0000000e-07]),
29: np.array([-0.3792915, 0.15650978, 0.0]),
30: np.array([0.38168151, -0.15842764, 0.0]),
31: np.array([0.15872338, 0.37986656, 0.0]),
32: np.array([-0.15649223, -0.38174939, 0.0]),
33: np.array([-0.50784728, 0.42138252, 0.0]),
34: np.array([-0.65722191, 0.06066527, 0.0]),
35: np.array([-0.69553448, 0.28790546, 0.0]),
36: np.array([0.50919023, -0.42239878, 0.0]),
37: np.array([0.65850471, -0.061772, 0.0]),
38: np.array([0.69644173, -0.28863769, 0.0]),
39: np.array([0.42252446, 0.50821553, 0.0]),
40: np.array([0.06197349, 0.65756204, 0.0]),
41: np.array([0.2887463, 0.69579777, 0.0]),
42: np.array([-0.42134337, -0.50931208, 0.0]),
43: np.array([-0.06076066, -0.6585379, 0.0]),
44: np.array([-0.28794272, -0.69650722, 0.0]),
45: np.array([0.27370682, 0.11206271, 0.0]),
46: np.array([0.56633144, 0.23392666, 0.0]),
47: np.array([0.11443226, -0.27341729, 0.0]),
48: np.array([0.23500879, -0.5662575, 0.0]),
49: np.array([-0.27110596, -0.11440358, 0.0]),
50: np.array([-0.56503275, -0.23508101, 0.0]),
51: np.array([-0.11166725, 0.27156671, 0.0]),
52: np.array([-0.23365889, 0.56524013, 0.0]),
53: np.array([0.00136546, -0.00109045, 0.0]),
54: np.array([-7.90828267e-01, -7.90828264e-01, -1.00000000e-07]),
55: np.array([-5.69668535e-01, -9.09867896e-01, -1.00000000e-07]),
56: np.array([-9.09867913e-01, -5.69668535e-01, -1.00000000e-07]),
57: np.array([7.90828264e-01, -7.90828264e-01, -1.00000000e-07]),
58: np.array([5.69668535e-01, -9.09867896e-01, -1.00000000e-07]),
59: np.array([9.09867896e-01, -5.69668535e-01, -1.00000000e-07]),
60: np.array([-7.90828292e-01, 7.90828292e-01, -1.00000000e-07]),
61: np.array([-5.69668535e-01, 9.09867913e-01, -1.00000000e-07]),
62: np.array([-9.09867913e-01, 5.69668535e-01, -1.00000000e-07]),
63: np.array([7.90828264e-01, 7.90828264e-01, -1.00000000e-07]),
64: np.array([5.69668535e-01, 9.09867896e-01, -1.00000000e-07]),
65: np.array([9.09867896e-01, 5.69668535e-01, -1.00000000e-07]),
}
linear_quadrilateral_cells = {
"quad": {
1: np.array([1, 34, 35, 23]),
2: np.array([21, 23, 35, 33]),
3: np.array([29, 33, 35, 34]),
4: np.array([4, 37, 38, 16]),
5: np.array([14, 16, 38, 36]),
6: np.array([30, 36, 38, 37]),
7: np.array([7, 40, 41, 25]),
8: np.array([24, 25, 41, 39]),
9: np.array([31, 39, 41, 40]),
10: np.array([3, 43, 44, 11]),
11: np.array([10, 11, 44, 42]),
12: np.array([32, 42, 44, 43]),
13: np.array([4, 26, 46, 37]),
14: np.array([30, 37, 46, 45]),
15: np.array([31, 45, 46, 39]),
16: np.array([24, 39, 46, 26]),
17: np.array([32, 43, 48, 47]),
18: np.array([30, 47, 48, 36]),
19: np.array([14, 36, 48, 15]),
20: np.array([3, 15, 48, 43]),
21: np.array([32, 49, 50, 42]),
22: np.array([10, 42, 50, 12]),
23: np.array([1, 12, 50, 34]),
24: np.array([29, 34, 50, 49]),
25: np.array([21, 33, 52, 22]),
26: np.array([7, 22, 52, 40]),
27: np.array([31, 40, 52, 51]),
28: np.array([29, 51, 52, 33]),
29: np.array([30, 45, 53, 47]),
30: np.array([32, 47, 53, 49]),
31: np.array([29, 49, 53, 51]),
32: np.array([31, 51, 53, 45]),
33: np.array([10, 54, 55, 11]),
34: np.array([3, 11, 55, 13]),
35: np.array([2, 13, 55, 54]),
36: np.array([2, 54, 56, 9]),
37: np.array([1, 9, 56, 12]),
38: np.array([10, 12, 56, 54]),
39: np.array([5, 57, 58, 18]),
40: np.array([3, 18, 58, 15]),
41: np.array([14, 15, 58, 57]),
42: np.array([14, 57, 59, 16]),
43: np.array([4, 16, 59, 17]),
44: np.array([5, 17, 59, 57]),
45: np.array([6, 60, 61, 20]),
46: np.array([7, 20, 61, 22]),
47: np.array([21, 22, 61, 60]),
48: np.array([21, 60, 62, 23]),
49: np.array([1, 23, 62, 19]),
50: np.array([6, 19, 62, 60]),
51: np.array([24, 63, 64, 25]),
52: np.array([7, 25, 64, 27]),
53: np.array([8, 27, 64, 63]),
54: np.array([8, 63, 65, 28]),
55: np.array([4, 28, 65, 26]),
56: np.array([24, 26, 65, 63]),
},
}
linear_quadrilateral_cell_sets = {
"DISK": np.array(
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
]
),
"MATERIAL_VOID": np.array(
[
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
56,
]
),
}
# Quadratic quadrilateral
quadratic_quadrilateral_vertices = {
1: np.array([-1.00000005e00, 1.06043306e-15, -5.00000000e-08]),
2: np.array([-1.0000001e00, -1.0000001e00, -1.0000000e-07]),
3: np.array([-1.05896103e-13, -1.00000005e00, -5.00000000e-08]),
4: np.array([1.0, 0.0, 0.0]),
5: np.array([1.0000001e00, -1.0000001e00, -1.0000000e-07]),
6: np.array([-1.0000001e00, 1.0000001e00, -1.0000000e-07]),
7: np.array([-1.08549196e-13, 1.00000005e00, -5.00000000e-08]),
8: np.array([1.0000001e00, 1.0000001e00, -1.0000000e-07]),
9: np.array([-1.00000010e00, -4.98511454e-01, -1.00000000e-07]),
10: np.array([-1.00000010e00, -2.52365176e-01, -1.00000000e-07]),
11: np.array([-1.00000010e00, -7.51512278e-01, -1.00000000e-07]),
12: np.array([-0.70710673, -0.70710683, 0.0]),
13: np.array([-0.91479328, -0.40392235, 0.0]),
14: np.array([-0.4039225, -0.91479321, 0.0]),
15: np.array([-4.98511453e-01, -1.00000010e00, -1.00000000e-07]),
16: np.array([-7.51512278e-01, -1.00000010e00, -1.00000000e-07]),
17: np.array([-2.52365179e-01, -1.00000010e00, -1.00000000e-07]),
18: np.array([0.70710602, -0.70710754, 0.0]),
19: np.array([0.40392263, -0.91479315, 0.0]),
20: np.array([0.91479409, -0.40392051, 0.0]),
21: np.array([1.00000010e00, -4.98511455e-01, -1.00000000e-07]),
22: np.array([1.00000010e00, -7.51512278e-01, -1.00000000e-07]),
23: np.array([1.00000010e00, -2.52365135e-01, -1.00000000e-07]),
24: np.array([4.98511459e-01, -1.00000010e00, -1.00000000e-07]),
25: np.array([2.52365169e-01, -1.00000010e00, -1.00000000e-07]),
26: np.array([7.51512278e-01, -1.00000010e00, -1.00000000e-07]),
27: np.array([-1.00000010e00, 4.98511454e-01, -1.00000000e-07]),
28: np.array([-1.00000010e00, 7.51512278e-01, -1.00000000e-07]),
29: np.array([-1.00000010e00, 2.52365177e-01, -1.00000000e-07]),
30: np.array([-4.98511454e-01, 1.00000010e00, -1.00000000e-07]),
31: np.array([-2.52365177e-01, 1.00000010e00, -1.00000000e-07]),
32: np.array([-7.51512278e-01, 1.00000010e00, -1.00000000e-07]),
33: np.array([-0.70710673, 0.70710683, 0.0]),
34: np.array([-0.4039225, 0.91479321, 0.0]),
35: np.array([-0.91479328, 0.40392235, 0.0]),
36: np.array([0.70710602, 0.70710754, 0.0]),
37: np.array([0.91479409, 0.40392051, 0.0]),
38: np.array([0.40392263, 0.91479315, 0.0]),
39: np.array([4.98511459e-01, 1.00000010e00, -1.00000000e-07]),
40: np.array([7.51512278e-01, 1.00000010e00, -1.00000000e-07]),
41: np.array([2.52365169e-01, 1.00000010e00, -1.00000000e-07]),
42: np.array([1.00000010e00, 4.98511455e-01, -1.00000000e-07]),
43: np.array([1.00000010e00, 2.52365135e-01, -1.00000000e-07]),
44: np.array([1.00000010e00, 7.51512278e-01, -1.00000000e-07]),
45: np.array([9.40648347e-09, 1.55599465e-11, 0.00000000e00]),
46: np.array([0.34876797, -0.34876795, 0.0]),
47: np.array([-0.34876795, -0.34876795, 0.0]),
48: np.array([-0.34876795, 0.34876795, 0.0]),
49: np.array([0.34876797, 0.34876795, 0.0]),
50: np.array([-7.35485354e-01, -7.35485333e-01, -1.00000000e-07]),
51: np.array([-8.76962152e-01, -6.28131775e-01, -1.00000000e-07]),
52: np.array([-7.19452931e-01, -7.19452916e-01, -1.00000000e-07]),
53: np.array([-6.28131780e-01, -8.76962143e-01, -1.00000000e-07]),
54: np.array([7.35485298e-01, -7.35485357e-01, -1.00000000e-07]),
55: np.array([6.28131775e-01, -8.76962123e-01, -1.00000000e-07]),
56: np.array([7.19453006e-01, -7.19452939e-01, -1.00000000e-07]),
57: np.array([8.76962194e-01, -6.28131808e-01, -1.00000000e-07]),
58: np.array([-7.35485351e-01, 7.35485352e-01, -1.00000000e-07]),
59: np.array([-7.19452905e-01, 7.19452905e-01, -1.00000000e-07]),
60: np.array([-8.76962150e-01, 6.28131775e-01, -1.00000000e-07]),
61: np.array([-6.28131775e-01, 8.76962150e-01, -1.00000000e-07]),
62: np.array([7.35485298e-01, 7.35485357e-01, -1.00000000e-07]),
63: np.array([8.76962194e-01, 6.28131808e-01, -1.00000000e-07]),
64: np.array([7.19453006e-01, 7.19452939e-01, -1.00000000e-07]),
65: np.array([6.28131775e-01, 8.76962123e-01, -1.00000000e-07]),
}
quadratic_quadrilateral_cells = {
"quad8": {
1: np.array([3, 18, 45, 12, 19, 46, 47, 14]),
2: np.array([1, 12, 45, 33, 13, 47, 48, 35]),
3: np.array([7, 33, 45, 36, 34, 48, 49, 38]),
4: np.array([4, 36, 45, 18, 37, 49, 46, 20]),
5: np.array([1, 9, 50, 12, 10, 51, 52, 13]),
6: np.array([2, 15, 50, 9, 16, 53, 51, 11]),
7: np.array([3, 12, 50, 15, 14, 52, 53, 17]),
8: np.array([3, 24, 54, 18, 25, 55, 56, 19]),
9: np.array([4, 18, 54, 21, 20, 56, 57, 23]),
10: np.array([5, 21, 54, 24, 22, 57, 55, 26]),
11: np.array([1, 33, 58, 27, 35, 59, 60, 29]),
12: np.array([6, 27, 58, 30, 28, 60, 61, 32]),
13: np.array([7, 30, 58, 33, 31, 61, 59, 34]),
14: np.array([4, 42, 62, 36, 43, 63, 64, 37]),
15: np.array([7, 36, 62, 39, 38, 64, 65, 41]),
16: np.array([8, 39, 62, 42, 40, 65, 63, 44]),
},
}
quadratic_quadrilateral_cell_sets = {
"DISK": np.array([1, 2, 3, 4]),
"MATERIAL_VOID": np.array([5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
}
two_disks_tri6_quad8_vertices = {
1: np.array([1.0, 0.0, 0.0]),
2: np.array([3.0, 0.0, 0.0]),
3: np.array([0.62348980185873, 0.78183148246803, 0]),
4: np.array([-0.22252093395631, 0.97492791218182, 0]),
5: np.array([-0.90096886790242, 0.43388373911756, 0]),
6: np.array([-0.90096886790242, -0.43388373911756, 0]),
7: np.array([-0.22252093395631, -0.97492791218182, 0]),
8: np.array([0.62348980185873, -0.78183148246803, 0]),
9: np.array([0.90096886790242, 0.43388373911756, 0]),
10: np.array([0.22252093395631, 0.97492791218182, 0]),
11: np.array([-0.62348980185873, 0.78183148246803, 0]),
12: np.array([-1, 1.2246467991474e-16, 0]),
13: np.array([-0.62348980185873, -0.78183148246803, 0]),
14: np.array([0.22252093395631, -0.97492791218182, 0]),
15: np.array([0.90096886790242, -0.43388373911756, 0]),
16: np.array([2.70710996354, 0.70710359881873, 0]),
17: np.array([1.9918462491018, 0.99996675762062, 0]),
18: np.array([1.2928926767367, 0.70710623910939, 0]),
19: np.array([1.0000332017522, 0.0081487669001626, 0]),
20: np.array([1.2928879761966, -0.7071015385308, 0]),
21: np.array([2.0076807905656, -0.99997050229309, 0]),
22: np.array([2.7070847766723, -0.70712878501603, 0]),
23: np.array([2.9237489226553, 0.38299860038019, 0]),
24: np.array([2.38299510833, 0.92375037049805, 0]),
25: np.array([1.6338745929072, 0.93056551960682, 0]),
26: np.array([1.0694364290352, 0.36613035983548, 0]),
27: np.array([1.0762502016859, -0.38299648838425, 0]),
28: np.array([1.6170182046085, -0.92375589004818, 0]),
29: np.array([2.3670690911736, -0.93019367999627, 0]),
30: np.array([2.9302142257684, -0.36701702164618, 0]),
31: np.array([1.5202888403297e-17, -7.7860210853066e-18, 0]),
32: np.array([-0.45048443395121, 0.21694186955878, 0]),
33: np.array([-0.11126046697816, 0.48746395609091, 0]),
34: np.array([-0.45048443395121, -0.21694186955878, 0]),
35: np.array([0.31174490092937, 0.39091574123401, 0]),
36: np.array([-0.11126046697816, -0.48746395609091, 0]),
37: np.array([0.5, -3.8930105426533e-18, 0]),
38: np.array([0.31174490092937, -0.39091574123401, 0]),
39: np.array([1.9939169035527, 0.00079973439438155, 0]),
40: np.array([2.3354083217744, -0.33306514902439, 0]),
41: np.array([1.652296679386, 0.34201120858086, 0]),
42: np.array([1.6405716677311, -0.35579835707924, 0]),
43: np.array([1.830900682302, 0.16345807128212, 0]),
44: np.array([1.3331173056494, 0.16859944295424, 0]),
45: np.array([2.3532471196873, 0.35675071610953, 0]),
46: np.array([2.1570160552678, -0.1586081427035, 0]),
47: np.array([2.660694970063, -0.16384124850615, 0]),
48: np.array([2.1649675154598, -0.65963968373472, 0]),
49: np.array([1.8282722837808, 0.66421423025096, 0]),
}
two_disks_tri6_quad8_cells = {
"triangle6": {
1: np.array([5, 31, 4, 32, 33, 11]),
2: np.array([6, 31, 5, 34, 32, 12]),
3: np.array([4, 31, 3, 33, 35, 10]),
4: np.array([7, 31, 6, 36, 34, 13]),
5: np.array([3, 31, 1, 35, 37, 9]),
6: np.array([8, 31, 7, 38, 36, 14]),
7: np.array([1, 31, 8, 37, 38, 15]),
},
"quad8": {
8: np.array([19, 20, 39, 41, 27, 42, 43, 44]),
9: np.array([2, 16, 39, 40, 23, 45, 46, 47]),
10: np.array([40, 39, 20, 21, 46, 42, 28, 48]),
11: np.array([41, 39, 16, 17, 43, 45, 24, 49]),
12: np.array([17, 18, 19, 41, 25, 26, 44, 49]),
13: np.array([21, 22, 2, 40, 29, 30, 47, 48]),
},
}
pin_1_vertices = {
1: np.array([0.0, 0.0, 0.0]),
2: np.array([2.0, 0.0, 0.0]),
3: np.array([0.0, 2.0, 0.0]),
4: np.array([2.0, 2.0, 0.0]),
5: np.array([1.5, 1.0, 0.0]),
9: np.array([0.5, 0.0, 0.0]),
10: np.array([1.0, 0.0, 0.0]),
11: np.array([1.5, 0.0, 0.0]),
12: np.array([0.25, 0.0, 0.0]),
13: np.array([0.75, 0.0, 0.0]),
14: np.array([1.25, 0.0, 0.0]),
15: np.array([1.75, 0.0, 0.0]),
16: np.array([0.0, 1.5, 0.0]),
17: np.array([0.0, 1.0, 0.0]),
18: np.array([0.0, 0.5, 0.0]),
19: np.array([0.0, 1.75, 0.0]),
20: np.array([0.0, 1.25, 0.0]),
21: np.array([0.0, 0.75, 0.0]),
22: np.array([0.0, 0.25, 0.0]),
23: np.array([2.0, 0.5, 0.0]),
24: np.array([2.0, 1.0, 0.0]),
25: np.array([2.0, 1.5, 0.0]),
26: np.array([2.0, 0.25, 0.0]),
27: np.array([2.0, 0.75, 0.0]),
28: np.array([2.0, 1.25, 0.0]),
29: np.array([2.0, 1.75, 0.0]),
30: np.array([1.5, 2.0, 0.0]),
31: np.array([1.0, 2.0, 0.0]),
32: np.array([0.5, 2.0, 0.0]),
33: np.array([1.75, 2.0, 0.0]),
34: np.array([1.25, 2.0, 0.0]),
35: np.array([0.75, 2.0, 0.0]),
36: np.array([0.25, 2.0, 0.0]),
37: np.array([1.3117449009294, 1.390915741234, 0.0]),
38: np.array([0.88873953302184, 1.4874639560909, 0.0]),
39: np.array([0.54951556604879, 1.2169418695588, 0.0]),
40: np.array([0.54951556604879, 0.78305813044122, 0.0]),
41: np.array([0.88873953302184, 0.51253604390909, 0.0]),
42: np.array([1.3117449009294, 0.60908425876599, 0.0]),
43: np.array([1.4504844339512, 1.2169418695588, 0.0]),
44: np.array([1.1112604669782, 1.4874639560909, 0.0]),
45: np.array([0.68825509907063, 1.390915741234, 0.0]),
46: np.array([0.5, 1.0, 0.0]),
47: np.array([0.68825509907063, 0.60908425876599, 0.0]),
48: np.array([1.1112604669782, 0.51253604390909, 0.0]),
49: np.array([1.4504844339512, 0.78305813044122, 0.0]),
84: np.array([1.0, 1.0, 0.0]),
85: np.array([1.1558724504647, 1.195457870617, 0.0]),
86: np.array([1.25, 1.0, 0.0]),
87: np.array([0.94436976651092, 1.2437319780455, 0.0]),
88: np.array([1.1558724504647, 0.80454212938299, 0.0]),
89: np.array([0.7747577830244, 1.1084709347794, 0.0]),
90: np.array([0.7747577830244, 0.89152906522061, 0.0]),
91: np.array([0.94436976651092, 0.75626802195454, 0.0]),
100: np.array([0.39945256244833, 1.6005474375517, 0.0]),
101: np.array([0.39945256244833, 0.39945256244833, 0.0]),
102: np.array([1.6749575669735, 0.67495756697349, 0.0]),
103: np.array([1.6749575669735, 1.3250424330265, 0.0]),
104: np.array([1.6076357762794, 1.6521790377186, 0.0]),
105: np.array([1.6076357762794, 0.34782096228136, 0.0]),
106: np.array([1.2323349413967, 1.7035415484495, 0.0]),
107: np.array([1.2323349413967, 0.29645845155054, 0.0]),
108: np.array([0.2747577830244, 0.89152906522061, 0.0]),
109: np.array([0.2747577830244, 1.1084709347794, 0.0]),
110: np.array([0.2747577830244, 0.64152906522061, 0.0]),
111: np.array([0.2747577830244, 1.3584709347794, 0.0]),
112: np.array([0.94436976651092, 0.25626802195454, 0.0]),
113: np.array([0.69436976651092, 0.25626802195454, 0.0]),
114: np.array([0.69436976651092, 1.7437319780455, 0.0]),
115: np.array([0.94436976651092, 1.7437319780455, 0.0]),
116: np.array([0.44972628122416, 1.8002737187758, 0.0]),
117: np.array([0.64409604773509, 1.5440056968213, 0.0]),
118: np.array([0.64409604773509, 0.45599430317871, 0.0]),
119: np.array([0.44972628122416, 0.19972628122416, 0.0]),
120: np.array([0.47448406424856, 1.4087446535552, 0.0]),
121: np.array([0.19972628122416, 1.5502737187758, 0.0]),
122: np.array([0.19972628122416, 0.44972628122416, 0.0]),
123: np.array([0.47448406424856, 0.59125534644477, 0.0]),
124: np.array([1.8038178881397, 0.42391048114068, 0.0]),
125: np.array([1.8038178881397, 0.17391048114068, 0.0]),
126: np.array([1.8038178881397, 1.8260895188593, 0.0]),
127: np.array([1.8038178881397, 1.5760895188593, 0.0]),
128: np.array([0.19972628122416, 0.19972628122416, 0.0]),
129: np.array([0.19972628122416, 1.8002737187758, 0.0]),
130: np.array([1.5538178881397, 0.17391048114068, 0.0]),
131: np.array([1.5538178881397, 1.8260895188593, 0.0]),
132: np.array([1.1161674706984, 0.14822922577527, 0.0]),
133: np.array([1.0605372372093, 0.40449724772982, 0.0]),
134: np.array([1.0605372372093, 1.5955027522702, 0.0]),
135: np.array([1.1161674706984, 1.8517707742247, 0.0]),
136: np.array([1.5874787834867, 0.83747878348674, 0.0]),
137: np.array([1.8374787834867, 0.83747878348674, 0.0]),
138: np.array([1.75, 1.0, 0.0]),
139: np.array([1.8374787834867, 1.1625212165133, 0.0]),
140: np.array([1.5874787834867, 1.1625212165133, 0.0]),
141: np.array([1.8374787834867, 1.4125212165133, 0.0]),
142: np.array([1.8374787834867, 0.58747878348674, 0.0]),
143: np.array([1.3661674706984, 0.14822922577527, 0.0]),
144: np.array([1.3661674706984, 1.8517707742247, 0.0]),
145: np.array([1.4933512339514, 0.64202091286974, 0.0]),
146: np.array([1.4933512339514, 1.3579790871303, 0.0]),
147: np.array([1.6412966716264, 0.51138926462742, 0.0]),
148: np.array([1.6412966716264, 1.4886107353726, 0.0]),
149: np.array([1.2720399211631, 0.45277135515826, 0.0]),
150: np.array([1.2720399211631, 1.5472286448417, 0.0]),
151: np.array([1.4199853588381, 1.677860293084, 0.0]),
152: np.array([1.4199853588381, 0.32213970691595, 0.0]),
153: np.array([1.4596903386044, 1.5215473894763, 0.0]),
154: np.array([1.4596903386044, 0.47845261052367, 0.0]),
}
pin_1_cells = {
"triangle6": {
1: np.array([37, 84, 5, 85, 86, 43]),
2: np.array([38, 84, 37, 87, 85, 44]),
3: np.array([5, 84, 42, 86, 88, 49]),
4: np.array([39, 84, 38, 89, 87, 45]),
5: np.array([40, 84, 39, 90, 89, 46]),
6: np.array([42, 84, 41, 88, 91, 48]),
7: np.array([41, 84, 40, 91, 90, 47]),
15: np.array([17, 40, 39, 108, 46, 109]),
16: np.array([18, 40, 17, 110, 108, 21]),
17: np.array([16, 17, 39, 20, 109, 111]),
18: np.array([9, 10, 41, 13, 112, 113]),
19: np.array([32, 38, 31, 114, 115, 35]),
20: np.array([32, 100, 38, 116, 117, 114]),
21: np.array([41, 101, 9, 118, 119, 113]),
22: np.array([39, 100, 16, 120, 121, 111]),
23: np.array([18, 101, 40, 122, 123, 110]),
24: np.array([23, 105, 2, 124, 125, 26]),
25: np.array([4, 104, 25, 126, 127, 29]),
26: np.array([9, 101, 1, 119, 128, 12]),
27: np.array([1, 101, 18, 128, 122, 22]),
28: np.array([16, 100, 3, 121, 129, 19]),
29: np.array([3, 100, 32, 129, 116, 36]),
30: np.array([38, 100, 39, 117, 120, 45]),
31: np.array([40, 101, 41, 123, 118, 47]),
32: np.array([2, 105, 11, 125, 130, 15]),
33: np.array([30, 104, 4, 131, 126, 33]),
34: np.array([10, 107, 41, 132, 133, 112]),
35: np.array([38, 106, 31, 134, 135, 115]),
36: np.array([5, 102, 24, 136, 137, 138]),
37: np.array([24, 103, 5, 139, 140, 138]),
38: np.array([25, 103, 24, 141, 139, 28]),
39: np.array([24, 102, 23, 137, 142, 27]),
40: np.array([11, 107, 10, 143, 132, 14]),
41: np.array([31, 106, 30, 135, 144, 34]),
42: np.array([42, 102, 5, 145, 136, 49]),
43: np.array([5, 103, 37, 140, 146, 43]),
44: np.array([102, 105, 23, 147, 124, 142]),
45: np.array([25, 104, 103, 127, 148, 141]),
46: np.array([41, 107, 42, 133, 149, 48]),
47: np.array([37, 106, 38, 150, 134, 44]),
48: np.array([30, 106, 104, 144, 151, 131]),
49: np.array([105, 107, 11, 152, 143, 130]),
50: np.array([104, 106, 37, 151, 150, 153]),
51: np.array([42, 107, 105, 149, 152, 154]),
52: np.array([42, 105, 102, 154, 147, 145]),
53: np.array([103, 104, 37, 148, 153, 146]),
}
}
pin_1_cell_sets = {
"MATERIAL_UO2": np.array([1, 2, 3, 4, 5, 6, 7]),
"MATERIAL_WATER": np.array(
[
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
]
),
}
pin_2_vertices = {
2: np.array([2.0, 0.0, 0.0]),
4: np.array([2.0, 2.0, 0.0]),
6: np.array([4.0, 0.0, 0.0]),
7: np.array([4.0, 2.0, 0.0]),
8: np.array([3.5, 1.0, 0.0]),
23: np.array([2.0, 0.5, 0.0]),
24: np.array([2.0, 1.0, 0.0]),
25: np.array([2.0, 1.5, 0.0]),
26: np.array([2.0, 0.25, 0.0]),
27: np.array([2.0, 0.75, 0.0]),
28: np.array([2.0, 1.25, 0.0]),
29: np.array([2.0, 1.75, 0.0]),
50: np.array([2.5, 0.0, 0.0]),
51: np.array([3.0, 0.0, 0.0]),
52: np.array([3.5, 0.0, 0.0]),
53: np.array([2.25, 0.0, 0.0]),
54: np.array([2.75, 0.0, 0.0]),
55: np.array([3.25, 0.0, 0.0]),
56: np.array([3.75, 0.0, 0.0]),
57: np.array([4.0, 0.5, 0.0]),
58: np.array([4.0, 1.0, 0.0]),
59: np.array([4.0, 1.5, 0.0]),
60: np.array([4.0, 0.25, 0.0]),
61: np.array([4.0, 0.75, 0.0]),
62: np.array([4.0, 1.25, 0.0]),
63: np.array([4.0, 1.75, 0.0]),
64: np.array([3.5, 2.0, 0.0]),
65: np.array([3.0, 2.0, 0.0]),
66: np.array([2.5, 2.0, 0.0]),
67: np.array([3.75, 2.0, 0.0]),
68: np.array([3.25, 2.0, 0.0]),
69: np.array([2.75, 2.0, 0.0]),
70: np.array([2.25, 2.0, 0.0]),
71: np.array([3.3117449009294, 1.390915741234, 0.0]),
72: np.array([2.8887395330218, 1.4874639560909, 0.0]),
73: np.array([2.5495155660488, 1.2169418695588, 0.0]),
74: np.array([2.5495155660488, 0.78305813044122, 0.0]),
75: np.array([2.8887395330218, 0.51253604390909, 0.0]),
76: np.array([3.3117449009294, 0.60908425876599, 0.0]),
77: np.array([3.4504844339512, 1.2169418695588, 0.0]),
78: np.array([3.1112604669782, 1.4874639560909, 0.0]),
79: np.array([2.6882550990706, 1.390915741234, 0.0]),
80: np.array([2.5, 1.0, 0.0]),
81: np.array([2.6882550990706, 0.60908425876599, 0.0]),
82: np.array([3.1112604669782, 0.51253604390909, 0.0]),
83: np.array([3.4504844339512, 0.78305813044122, 0.0]),
92: np.array([3.0, 1.0, 0.0]),
93: np.array([2.7747577830244, 1.1084709347794, 0.0]),
94: np.array([2.9443697665109, 1.2437319780455, 0.0]),
95: np.array([2.7747577830244, 0.89152906522061, 0.0]),
96: np.array([2.9443697665109, 0.75626802195454, 0.0]),
97: np.array([3.1558724504647, 1.195457870617, 0.0]),
98: np.array([3.1558724504647, 0.80454212938299, 0.0]),
99: np.array([3.25, 1.0, 0.0]),
155: np.array([2.3994525624483, 1.6005474375517, 0.0]),
156: np.array([2.3994525624483, 0.39945256244833, 0.0]),
157: np.array([3.6749575669735, 1.3250424330265, 0.0]),
158: np.array([3.6749575669735, 0.67495756697349, 0.0]),
159: np.array([3.6076357762794, 1.6521790377186, 0.0]),
160: np.array([3.6076357762794, 0.34782096228136, 0.0]),
161: np.array([3.2323349413967, 0.29645845155054, 0.0]),
162: np.array([3.2323349413967, 1.7035415484495, 0.0]),
163: np.array([2.2747577830244, 0.89152906522061, 0.0]),
164: np.array([2.2747577830244, 1.1084709347794, 0.0]),
165: np.array([2.2747577830244, 0.64152906522061, 0.0]),
166: np.array([2.2747577830244, 1.3584709347794, 0.0]),
167: np.array([2.9443697665109, 0.25626802195454, 0.0]),
168: np.array([2.6943697665109, 0.25626802195454, 0.0]),
169: np.array([2.6943697665109, 1.7437319780455, 0.0]),
170: np.array([2.9443697665109, 1.7437319780455, 0.0]),
171: np.array([2.6440960477351, 0.45599430317871, 0.0]),
172: np.array([2.4497262812242, 0.19972628122416, 0.0]),
173: np.array([2.4497262812242, 1.8002737187758, 0.0]),
174: np.array([2.6440960477351, 1.5440056968213, 0.0]),
175: np.array([2.4744840642486, 1.4087446535552, 0.0]),
176: np.array([2.1997262812242, 1.5502737187758, 0.0]),
177: np.array([2.1997262812242, 0.44972628122416, 0.0]),
178: np.array([2.4744840642486, 0.59125534644477, 0.0]),
179: np.array([3.8038178881397, 0.42391048114068, 0.0]),
180: np.array([3.8038178881397, 0.17391048114068, 0.0]),
181: np.array([3.8038178881397, 1.8260895188593, 0.0]),
182: np.array([3.8038178881397, 1.5760895188593, 0.0]),
183: np.array([2.1997262812242, 0.19972628122416, 0.0]),
184: np.array([2.1997262812242, 1.8002737187758, 0.0]),
185: np.array([3.5538178881397, 0.17391048114068, 0.0]),
186: np.array([3.5538178881397, 1.8260895188593, 0.0]),
187: np.array([3.1161674706984, 0.14822922577527, 0.0]),
188: np.array([3.0605372372093, 0.40449724772982, 0.0]),
189: np.array([3.0605372372093, 1.5955027522702, 0.0]),
190: np.array([3.1161674706984, 1.8517707742247, 0.0]),
191: np.array([3.5874787834867, 0.83747878348674, 0.0]),
192: np.array([3.8374787834867, 0.83747878348674, 0.0]),
193: np.array([3.75, 1.0, 0.0]),
194: np.array([3.8374787834867, 1.1625212165133, 0.0]),
195: np.array([3.5874787834867, 1.1625212165133, 0.0]),
196: np.array([3.8374787834867, 1.4125212165133, 0.0]),
197: np.array([3.8374787834867, 0.58747878348674, 0.0]),
198: np.array([3.3661674706984, 0.14822922577527, 0.0]),
199: np.array([3.3661674706984, 1.8517707742247, 0.0]),
200: np.array([3.4933512339514, 1.3579790871303, 0.0]),
201: np.array([3.4933512339514, 0.64202091286974, 0.0]),
202: np.array([3.6412966716264, 0.51138926462742, 0.0]),
203: np.array([3.6412966716264, 1.4886107353726, 0.0]),
204: np.array([3.2720399211631, 1.5472286448417, 0.0]),
205: np.array([3.2720399211631, 0.45277135515826, 0.0]),
206: np.array([3.4199853588381, 1.677860293084, 0.0]),
207: np.array([3.4199853588381, 0.32213970691595, 0.0]),
208: np.array([3.4596903386044, 0.47845261052367, 0.0]),
209: np.array([3.4596903386044, 1.5215473894763, 0.0]),
}
pin_2_cells = {
"triangle6": {
8: np.array([73, 92, 72, 93, 94, 79]),
9: np.array([74, 92, 73, 95, 93, 80]),
10: np.array([75, 92, 74, 96, 95, 81]),
11: np.array([72, 92, 71, 94, 97, 78]),
12: np.array([76, 92, 75, 98, 96, 82]),
13: np.array([71, 92, 8, 97, 99, 77]),
14: np.array([8, 92, 76, 99, 98, 83]),
54: np.array([24, 74, 73, 163, 80, 164]),
55: np.array([24, 23, 74, 27, 165, 163]),
56: np.array([24, 73, 25, 164, 166, 28]),
57: np.array([50, 51, 75, 54, 167, 168]),
58: np.array([66, 72, 65, 169, 170, 69]),
59: np.array([75, 156, 50, 171, 172, 168]),
60: np.array([66, 155, 72, 173, 174, 169]),
61: np.array([73, 155, 25, 175, 176, 166]),
62: np.array([23, 156, 74, 177, 178, 165]),
63: np.array([57, 160, 6, 179, 180, 60]),
64: np.array([7, 159, 59, 181, 182, 63]),
65: np.array([2, 156, 23, 183, 177, 26]),
66: np.array([50, 156, 2, 172, 183, 53]),
67: np.array([25, 155, 4, 176, 184, 29]),
68: np.array([4, 155, 66, 184, 173, 70]),
69: np.array([72, 155, 73, 174, 175, 79]),
70: np.array([74, 156, 75, 178, 171, 81]),
71: np.array([6, 160, 52, 180, 185, 56]),
72: np.array([64, 159, 7, 186, 181, 67]),
73: np.array([51, 161, 75, 187, 188, 167]),
74: np.array([72, 162, 65, 189, 190, 170]),
75: np.array([8, 158, 58, 191, 192, 193]),
76: np.array([58, 157, 8, 194, 195, 193]),
77: np.array([59, 157, 58, 196, 194, 62]),
78: np.array([58, 158, 57, 192, 197, 61]),
79: np.array([52, 161, 51, 198, 187, 55]),
80: np.array([65, 162, 64, 190, 199, 68]),
81: np.array([8, 157, 71, 195, 200, 77]),
82: np.array([76, 158, 8, 201, 191, 83]),
83: np.array([158, 160, 57, 202, 179, 197]),
84: np.array([59, 159, 157, 182, 203, 196]),
85: np.array([71, 162, 72, 204, 189, 78]),
86: np.array([75, 161, 76, 188, 205, 82]),
87: np.array([64, 162, 159, 199, 206, 186]),
88: np.array([160, 161, 52, 207, 198, 185]),
89: np.array([76, 161, 160, 205, 207, 208]),
90: np.array([159, 162, 71, 206, 204, 209]),
91: np.array([76, 160, 158, 208, 202, 201]),
92: np.array([157, 159, 71, 203, 209, 200]),
}
}
pin_2_cell_sets = {
"MATERIAL_MOX": np.array([8, 9, 10, 11, 12, 13, 14]),
"MATERIAL_WATER": np.array(
[
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
}
pin_1and2_vertices = {
1: np.array([0.0, 0.0, 0.0]),
2: np.array([2.0, 0.0, 0.0]),
3: np.array([0.0, 2.0, 0.0]),
4: np.array([2.0, 2.0, 0.0]),
5: np.array([1.5, 1.0, 0.0]),
6: np.array([4.0, 0.0, 0.0]),
7: np.array([4.0, 2.0, 0.0]),
8: np.array([3.5, 1.0, 0.0]),
9: np.array([0.5, 0.0, 0.0]),
10: np.array([1.0, 0.0, 0.0]),
11: np.array([1.5, 0.0, 0.0]),
12: np.array([0.25, 0.0, 0.0]),
13: np.array([0.75, 0.0, 0.0]),
14: np.array([1.25, 0.0, 0.0]),
15: np.array([1.75, 0.0, 0.0]),
16: np.array([0.0, 1.5, 0.0]),
17: np.array([0.0, 1.0, 0.0]),
18: np.array([0.0, 0.5, 0.0]),
19: np.array([0.0, 1.75, 0.0]),
20: np.array([0.0, 1.25, 0.0]),
21: np.array([0.0, 0.75, 0.0]),
22: np.array([0.0, 0.25, 0.0]),
23: np.array([2.0, 0.5, 0.0]),
24: np.array([2.0, 1.0, 0.0]),
25: np.array([2.0, 1.5, 0.0]),
26: np.array([2.0, 0.25, 0.0]),
27: np.array([2.0, 0.75, 0.0]),
28: np.array([2.0, 1.25, 0.0]),
29: np.array([2.0, 1.75, 0.0]),
30: np.array([1.5, 2.0, 0.0]),
31: np.array([1.0, 2.0, 0.0]),
32: np.array([0.5, 2.0, 0.0]),
33: np.array([1.75, 2.0, 0.0]),
34: np.array([1.25, 2.0, 0.0]),
35: np.array([0.75, 2.0, 0.0]),
36: np.array([0.25, 2.0, 0.0]),
37: np.array([1.3117449009294, 1.390915741234, 0.0]),
38: np.array([0.88873953302184, 1.4874639560909, 0.0]),
39: np.array([0.54951556604879, 1.2169418695588, 0.0]),
40: np.array([0.54951556604879, 0.78305813044122, 0.0]),
41: np.array([0.88873953302184, 0.51253604390909, 0.0]),
42: np.array([1.3117449009294, 0.60908425876599, 0.0]),
43: np.array([1.4504844339512, 1.2169418695588, 0.0]),
44: np.array([1.1112604669782, 1.4874639560909, 0.0]),
45: np.array([0.68825509907063, 1.390915741234, 0.0]),
46: np.array([0.5, 1.0, 0.0]),
47: np.array([0.68825509907063, 0.60908425876599, 0.0]),
48: np.array([1.1112604669782, 0.51253604390909, 0.0]),
49: np.array([1.4504844339512, 0.78305813044122, 0.0]),
50: np.array([2.5, 0.0, 0.0]),
51: np.array([3.0, 0.0, 0.0]),
52: np.array([3.5, 0.0, 0.0]),
53: np.array([2.25, 0.0, 0.0]),
54: np.array([2.75, 0.0, 0.0]),
55: np.array([3.25, 0.0, 0.0]),
56: np.array([3.75, 0.0, 0.0]),
57: np.array([4.0, 0.5, 0.0]),
58: np.array([4.0, 1.0, 0.0]),
59: np.array([4.0, 1.5, 0.0]),
60: np.array([4.0, 0.25, 0.0]),
61: np.array([4.0, 0.75, 0.0]),
62: np.array([4.0, 1.25, 0.0]),
63: np.array([4.0, 1.75, 0.0]),
64: np.array([3.5, 2.0, 0.0]),
65: np.array([3.0, 2.0, 0.0]),
66: np.array([2.5, 2.0, 0.0]),
67: np.array([3.75, 2.0, 0.0]),
68: np.array([3.25, 2.0, 0.0]),
69: np.array([2.75, 2.0, 0.0]),
70: np.array([2.25, 2.0, 0.0]),
71: np.array([3.3117449009294, 1.390915741234, 0.0]),
72: np.array([2.8887395330218, 1.4874639560909, 0.0]),
73: np.array([2.5495155660488, 1.2169418695588, 0.0]),
74: np.array([2.5495155660488, 0.78305813044122, 0.0]),
75: np.array([2.8887395330218, 0.51253604390909, 0.0]),
76: np.array([3.3117449009294, 0.60908425876599, 0.0]),
77: np.array([3.4504844339512, 1.2169418695588, 0.0]),
78: np.array([3.1112604669782, 1.4874639560909, 0.0]),
79: np.array([2.6882550990706, 1.390915741234, 0.0]),
80: np.array([2.5, 1.0, 0.0]),
81: np.array([2.6882550990706, 0.60908425876599, 0.0]),
82: np.array([3.1112604669782, 0.51253604390909, 0.0]),
83: np.array([3.4504844339512, 0.78305813044122, 0.0]),
84: np.array([1.0, 1.0, 0.0]),
85: np.array([1.1558724504647, 1.195457870617, 0.0]),
86: np.array([1.25, 1.0, 0.0]),
87: np.array([0.94436976651092, 1.2437319780455, 0.0]),
88: np.array([1.1558724504647, 0.80454212938299, 0.0]),
89: np.array([0.7747577830244, 1.1084709347794, 0.0]),
90: np.array([0.7747577830244, 0.89152906522061, 0.0]),
91: np.array([0.94436976651092, 0.75626802195454, 0.0]),
92: np.array([3.0, 1.0, 0.0]),
93: np.array([2.7747577830244, 1.1084709347794, 0.0]),
94: np.array([2.9443697665109, 1.2437319780455, 0.0]),
95: np.array([2.7747577830244, 0.89152906522061, 0.0]),
96: np.array([2.9443697665109, 0.75626802195454, 0.0]),
97: np.array([3.1558724504647, 1.195457870617, 0.0]),
98: np.array([3.1558724504647, 0.80454212938299, 0.0]),
99: np.array([3.25, 1.0, 0.0]),
100: np.array([0.39945256244833, 1.6005474375517, 0.0]),
101: np.array([0.39945256244833, 0.39945256244833, 0.0]),
102: np.array([1.6749575669735, 0.67495756697349, 0.0]),
103: np.array([1.6749575669735, 1.3250424330265, 0.0]),
104: np.array([1.6076357762794, 1.6521790377186, 0.0]),
105: np.array([1.6076357762794, 0.34782096228136, 0.0]),
106: np.array([1.2323349413967, 1.7035415484495, 0.0]),
107: np.array([1.2323349413967, 0.29645845155054, 0.0]),
108: np.array([0.2747577830244, 0.89152906522061, 0.0]),
109: np.array([0.2747577830244, 1.1084709347794, 0.0]),
110: np.array([0.2747577830244, 0.64152906522061, 0.0]),
111: np.array([0.2747577830244, 1.3584709347794, 0.0]),
112: np.array([0.94436976651092, 0.25626802195454, 0.0]),
113: np.array([0.69436976651092, 0.25626802195454, 0.0]),
114: np.array([0.69436976651092, 1.7437319780455, 0.0]),
115: np.array([0.94436976651092, 1.7437319780455, 0.0]),
116: np.array([0.44972628122416, 1.8002737187758, 0.0]),
117: np.array([0.64409604773509, 1.5440056968213, 0.0]),
118: np.array([0.64409604773509, 0.45599430317871, 0.0]),
119: np.array([0.44972628122416, 0.19972628122416, 0.0]),
120: np.array([0.47448406424856, 1.4087446535552, 0.0]),
121: np.array([0.19972628122416, 1.5502737187758, 0.0]),
122: np.array([0.19972628122416, 0.44972628122416, 0.0]),
123: np.array([0.47448406424856, 0.59125534644477, 0.0]),
124: np.array([1.8038178881397, 0.42391048114068, 0.0]),
125: np.array([1.8038178881397, 0.17391048114068, 0.0]),
126: np.array([1.8038178881397, 1.8260895188593, 0.0]),
127: np.array([1.8038178881397, 1.5760895188593, 0.0]),
128: np.array([0.19972628122416, 0.19972628122416, 0.0]),
129: np.array([0.19972628122416, 1.8002737187758, 0.0]),
130: np.array([1.5538178881397, 0.17391048114068, 0.0]),
131: np.array([1.5538178881397, 1.8260895188593, 0.0]),
132: np.array([1.1161674706984, 0.14822922577527, 0.0]),
133: np.array([1.0605372372093, 0.40449724772982, 0.0]),
134: np.array([1.0605372372093, 1.5955027522702, 0.0]),
135: np.array([1.1161674706984, 1.8517707742247, 0.0]),
136: np.array([1.5874787834867, 0.83747878348674, 0.0]),
137: np.array([1.8374787834867, 0.83747878348674, 0.0]),
138: np.array([1.75, 1.0, 0.0]),
139: np.array([1.8374787834867, 1.1625212165133, 0.0]),
140: np.array([1.5874787834867, 1.1625212165133, 0.0]),
141: np.array([1.8374787834867, 1.4125212165133, 0.0]),
142: np.array([1.8374787834867, 0.58747878348674, 0.0]),
143: np.array([1.3661674706984, 0.14822922577527, 0.0]),
144: np.array([1.3661674706984, 1.8517707742247, 0.0]),
145: np.array([1.4933512339514, 0.64202091286974, 0.0]),
146: np.array([1.4933512339514, 1.3579790871303, 0.0]),
147: np.array([1.6412966716264, 0.51138926462742, 0.0]),
148: np.array([1.6412966716264, 1.4886107353726, 0.0]),
149: np.array([1.2720399211631, 0.45277135515826, 0.0]),
150: np.array([1.2720399211631, 1.5472286448417, 0.0]),
151: np.array([1.4199853588381, 1.677860293084, 0.0]),
152: np.array([1.4199853588381, 0.32213970691595, 0.0]),
153: np.array([1.4596903386044, 1.5215473894763, 0.0]),
154: np.array([1.4596903386044, 0.47845261052367, 0.0]),
155: np.array([2.3994525624483, 1.6005474375517, 0.0]),
156: np.array([2.3994525624483, 0.39945256244833, 0.0]),
157: np.array([3.6749575669735, 1.3250424330265, 0.0]),
158: np.array([3.6749575669735, 0.67495756697349, 0.0]),
159: np.array([3.6076357762794, 1.6521790377186, 0.0]),
160: np.array([3.6076357762794, 0.34782096228136, 0.0]),
161: np.array([3.2323349413967, 0.29645845155054, 0.0]),
162: np.array([3.2323349413967, 1.7035415484495, 0.0]),
163: np.array([2.2747577830244, 0.89152906522061, 0.0]),
164: np.array([2.2747577830244, 1.1084709347794, 0.0]),
165: np.array([2.2747577830244, 0.64152906522061, 0.0]),
166: np.array([2.2747577830244, 1.3584709347794, 0.0]),
167: np.array([2.9443697665109, 0.25626802195454, 0.0]),
168: np.array([2.6943697665109, 0.25626802195454, 0.0]),
169: np.array([2.6943697665109, 1.7437319780455, 0.0]),
170: np.array([2.9443697665109, 1.7437319780455, 0.0]),
171: np.array([2.6440960477351, 0.45599430317871, 0.0]),
172: np.array([2.4497262812242, 0.19972628122416, 0.0]),
173: np.array([2.4497262812242, 1.8002737187758, 0.0]),
174: np.array([2.6440960477351, 1.5440056968213, 0.0]),
175: np.array([2.4744840642486, 1.4087446535552, 0.0]),
176: np.array([2.1997262812242, 1.5502737187758, 0.0]),
177: np.array([2.1997262812242, 0.44972628122416, 0.0]),
178: np.array([2.4744840642486, 0.59125534644477, 0.0]),
179: np.array([3.8038178881397, 0.42391048114068, 0.0]),
180: np.array([3.8038178881397, 0.17391048114068, 0.0]),
181: np.array([3.8038178881397, 1.8260895188593, 0.0]),
182: np.array([3.8038178881397, 1.5760895188593, 0.0]),
183: np.array([2.1997262812242, 0.19972628122416, 0.0]),
184: np.array([2.1997262812242, 1.8002737187758, 0.0]),
185: np.array([3.5538178881397, 0.17391048114068, 0.0]),
186: np.array([3.5538178881397, 1.8260895188593, 0.0]),
187: np.array([3.1161674706984, 0.14822922577527, 0.0]),
188: np.array([3.0605372372093, 0.40449724772982, 0.0]),
189: np.array([3.0605372372093, 1.5955027522702, 0.0]),
190: np.array([3.1161674706984, 1.8517707742247, 0.0]),
191: np.array([3.5874787834867, 0.83747878348674, 0.0]),
192: np.array([3.8374787834867, 0.83747878348674, 0.0]),
193: np.array([3.75, 1.0, 0.0]),
194: np.array([3.8374787834867, 1.1625212165133, 0.0]),
195: np.array([3.5874787834867, 1.1625212165133, 0.0]),
196: np.array([3.8374787834867, 1.4125212165133, 0.0]),
197: np.array([3.8374787834867, 0.58747878348674, 0.0]),
198: np.array([3.3661674706984, 0.14822922577527, 0.0]),
199: np.array([3.3661674706984, 1.8517707742247, 0.0]),
200: np.array([3.4933512339514, 1.3579790871303, 0.0]),
201: np.array([3.4933512339514, 0.64202091286974, 0.0]),
202: np.array([3.6412966716264, 0.51138926462742, 0.0]),
203: np.array([3.6412966716264, 1.4886107353726, 0.0]),
204: np.array([3.2720399211631, 1.5472286448417, 0.0]),
205: np.array([3.2720399211631, 0.45277135515826, 0.0]),
206: np.array([3.4199853588381, 1.677860293084, 0.0]),
207: np.array([3.4199853588381, 0.32213970691595, 0.0]),
208: np.array([3.4596903386044, 0.47845261052367, 0.0]),
209: np.array([3.4596903386044, 1.5215473894763, 0.0]),
}
pin_1and2_cells = {
"triangle6": {
1: np.array([37, 84, 5, 85, 86, 43]),
2: np.array([38, 84, 37, 87, 85, 44]),
3: np.array([5, 84, 42, 86, 88, 49]),
4: np.array([39, 84, 38, 89, 87, 45]),
5: np.array([40, 84, 39, 90, 89, 46]),
6: np.array([42, 84, 41, 88, 91, 48]),
7: np.array([41, 84, 40, 91, 90, 47]),
8: np.array([73, 92, 72, 93, 94, 79]),
9: np.array([74, 92, 73, 95, 93, 80]),
10: np.array([75, 92, 74, 96, 95, 81]),
11: np.array([72, 92, 71, 94, 97, 78]),
12: np.array([76, 92, 75, 98, 96, 82]),
13: np.array([71, 92, 8, 97, 99, 77]),
14: np.array([8, 92, 76, 99, 98, 83]),
15: np.array([17, 40, 39, 108, 46, 109]),
16: np.array([18, 40, 17, 110, 108, 21]),
17: np.array([16, 17, 39, 20, 109, 111]),
18: np.array([9, 10, 41, 13, 112, 113]),
19: np.array([32, 38, 31, 114, 115, 35]),
20: np.array([32, 100, 38, 116, 117, 114]),
21: np.array([41, 101, 9, 118, 119, 113]),
22: np.array([39, 100, 16, 120, 121, 111]),
23: np.array([18, 101, 40, 122, 123, 110]),
24: np.array([23, 105, 2, 124, 125, 26]),
25: np.array([4, 104, 25, 126, 127, 29]),
26: np.array([9, 101, 1, 119, 128, 12]),
27: np.array([1, 101, 18, 128, 122, 22]),
28: np.array([16, 100, 3, 121, 129, 19]),
29: np.array([3, 100, 32, 129, 116, 36]),
30: np.array([38, 100, 39, 117, 120, 45]),
31: np.array([40, 101, 41, 123, 118, 47]),
32: np.array([2, 105, 11, 125, 130, 15]),
33: np.array([30, 104, 4, 131, 126, 33]),
34: np.array([10, 107, 41, 132, 133, 112]),
35: np.array([38, 106, 31, 134, 135, 115]),
36: np.array([5, 102, 24, 136, 137, 138]),
37: np.array([24, 103, 5, 139, 140, 138]),
38: np.array([25, 103, 24, 141, 139, 28]),
39: np.array([24, 102, 23, 137, 142, 27]),
40: np.array([11, 107, 10, 143, 132, 14]),
41: np.array([31, 106, 30, 135, 144, 34]),
42: np.array([42, 102, 5, 145, 136, 49]),
43: np.array([5, 103, 37, 140, 146, 43]),
44: np.array([102, 105, 23, 147, 124, 142]),
45: np.array([25, 104, 103, 127, 148, 141]),
46: np.array([41, 107, 42, 133, 149, 48]),
47: np.array([37, 106, 38, 150, 134, 44]),
48: np.array([30, 106, 104, 144, 151, 131]),
49: np.array([105, 107, 11, 152, 143, 130]),
50: np.array([104, 106, 37, 151, 150, 153]),
51: np.array([42, 107, 105, 149, 152, 154]),
52: np.array([42, 105, 102, 154, 147, 145]),
53: np.array([103, 104, 37, 148, 153, 146]),
54: np.array([24, 74, 73, 163, 80, 164]),
55: np.array([24, 23, 74, 27, 165, 163]),
56: np.array([24, 73, 25, 164, 166, 28]),
57: np.array([50, 51, 75, 54, 167, 168]),
58: np.array([66, 72, 65, 169, 170, 69]),
59: np.array([75, 156, 50, 171, 172, 168]),
60: np.array([66, 155, 72, 173, 174, 169]),
61: np.array([73, 155, 25, 175, 176, 166]),
62: np.array([23, 156, 74, 177, 178, 165]),
63: np.array([57, 160, 6, 179, 180, 60]),
64: np.array([7, 159, 59, 181, 182, 63]),
65: np.array([2, 156, 23, 183, 177, 26]),
66: np.array([50, 156, 2, 172, 183, 53]),
67: np.array([25, 155, 4, 176, 184, 29]),
68: np.array([4, 155, 66, 184, 173, 70]),
69: np.array([72, 155, 73, 174, 175, 79]),
70: np.array([74, 156, 75, 178, 171, 81]),
71: np.array([6, 160, 52, 180, 185, 56]),
72: np.array([64, 159, 7, 186, 181, 67]),
73: np.array([51, 161, 75, 187, 188, 167]),
74: np.array([72, 162, 65, 189, 190, 170]),
75: np.array([8, 158, 58, 191, 192, 193]),
76: np.array([58, 157, 8, 194, 195, 193]),
77: np.array([59, 157, 58, 196, 194, 62]),
78: np.array([58, 158, 57, 192, 197, 61]),
79: np.array([52, 161, 51, 198, 187, 55]),
80: np.array([65, 162, 64, 190, 199, 68]),
81: np.array([8, 157, 71, 195, 200, 77]),
82: np.array([76, 158, 8, 201, 191, 83]),
83: np.array([158, 160, 57, 202, 179, 197]),
84: np.array([59, 159, 157, 182, 203, 196]),
85: np.array([71, 162, 72, 204, 189, 78]),
86: np.array([75, 161, 76, 188, 205, 82]),
87: np.array([64, 162, 159, 199, 206, 186]),
88: np.array([160, 161, 52, 207, 198, 185]),
89: np.array([76, 161, 160, 205, 207, 208]),
90: np.array([159, 162, 71, 206, 204, 209]),
91: np.array([76, 160, 158, 208, 202, 201]),
92: np.array([157, 159, 71, 203, 209, 200]),
}
}
pin_1and2_cell_sets = {
"MATERIAL_UO2": np.array([1, 2, 3, 4, 5, 6, 7]),
"MATERIAL_MOX": np.array([8, 9, 10, 11, 12, 13, 14]),
"GRID_L1_1_1": np.array(
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
"GRID_L2_1_1": np.array(
[
1,
2,
3,
4,
5,
6,
7,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
]
),
"GRID_L2_2_1": np.array(
[
8,
9,
10,
11,
12,
13,
14,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
"MATERIAL_WATER": np.array(
[
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
}
pin_1and2_cell_sets_1_level = {
"MATERIAL_UO2": np.array([1, 2, 3, 4, 5, 6, 7]),
"MATERIAL_MOX": np.array([8, 9, 10, 11, 12, 13, 14]),
"GRID_L1_1_1": np.array(
[
1,
2,
3,
4,
5,
6,
7,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
]
),
"GRID_L1_2_1": np.array(
[
8,
9,
10,
11,
12,
13,
14,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
"MATERIAL_WATER": np.array(
[
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
"Pin_1": np.array(
[
1,
2,
3,
4,
5,
6,
7,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
]
),
"Pin_2": np.array(
[
8,
9,
10,
11,
12,
13,
14,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
]
),
}
three_level_grid_vertices = {
1: np.array([0.0, 0.0, 0.0]),
2: np.array([1.0, 0.0, 0.0]),
3: np.array([1.0, 1.0, 0.0]),
4: np.array([0.0, 1.0, 0.0]),
5: np.array([2.0, 0.0, 0.0]),
6: np.array([2.0, 1.0, 0.0]),
7: np.array([3.0, 0.0, 0.0]),
8: np.array([3.0, 1.0, 0.0]),
9: np.array([4.0, 0.0, 0.0]),
10: np.array([4.0, 1.0, 0.0]),
11: np.array([1.0, 2.0, 0.0]),
12: np.array([0.0, 2.0, 0.0]),
13: np.array([2.0, 2.0, 0.0]),
14: np.array([3.0, 2.0, 0.0]),
15: np.array([4.0, 2.0, 0.0]),
16: np.array([1.0, 3.0, 0.0]),
17: np.array([0.0, 3.0, 0.0]),
18: np.array([2.0, 3.0, 0.0]),
19: np.array([3.0, 3.0, 0.0]),
20: np.array([4.0, 3.0, 0.0]),
21: np.array([1.0, 4.0, 0.0]),
22: np.array([0.0, 4.0, 0.0]),
23: np.array([2.0, 4.0, 0.0]),
24: np.array([3.0, 4.0, 0.0]),
25: np.array([4.0, 4.0, 0.0]),
26: np.array([0.5, 0.0, 0.0]),
27: np.array([1.0, 0.5, 0.0]),
28: np.array([0.5, 1.0, 0.0]),
29: np.array([0.0, 0.5, 0.0]),
30: np.array([1.5, 0.0, 0.0]),
31: np.array([2.0, 0.5, 0.0]),
32: np.array([1.5, 1.0, 0.0]),
33: np.array([1.0, 1.5, 0.0]),
34: np.array([0.5, 2.0, 0.0]),
35: np.array([0.0, 1.5, 0.0]),
36: np.array([2.0, 1.5, 0.0]),
37: np.array([1.5, 2.0, 0.0]),
38: np.array([0.5, 0.5, 0.0]),
39: np.array([1.5, 0.5, 0.0]),
40: np.array([2.5, 0.5, 0.0]),
41: np.array([0.5, 1.5, 0.0]),
42: np.array([1.5, 1.5, 0.0]),
}
three_level_grid_cells = {
"quad": {
1: np.array([26, 2, 27, 38]),
2: np.array([27, 3, 28, 38]),
3: np.array([1, 26, 38, 29]),
4: np.array([4, 29, 38, 28]),
5: np.array([30, 5, 31, 39]),
6: np.array([31, 6, 32, 39]),
7: np.array([2, 30, 39, 27]),
8: np.array([3, 27, 39, 32]),
16: np.array([28, 3, 33, 41]),
17: np.array([33, 11, 34, 41]),
18: np.array([4, 28, 41, 35]),
19: np.array([12, 35, 41, 34]),
20: np.array([32, 6, 36, 42]),
21: np.array([36, 13, 37, 42]),
22: np.array([3, 32, 42, 33]),
23: np.array([11, 33, 42, 37]),
},
"triangle": {
9: np.array([7, 40, 5]),
10: np.array([6, 40, 8]),
11: np.array([8, 40, 7]),
12: np.array([5, 40, 31]),
13: np.array([31, 40, 6]),
14: np.array([8, 7, 10]),
15: np.array([7, 9, 10]),
24: np.array([36, 8, 14]),
25: np.array([6, 8, 36]),
26: np.array([13, 36, 14]),
27: np.array([14, 8, 10]),
28: np.array([14, 10, 15]),
29: np.array([17, 34, 16]),
30: np.array([34, 11, 16]),
31: np.array([17, 12, 34]),
32: np.array([16, 37, 18]),
33: np.array([16, 11, 37]),
34: np.array([37, 13, 18]),
35: np.array([13, 14, 18]),
36: np.array([18, 14, 19]),
37: np.array([19, 14, 15]),
38: np.array([19, 15, 20]),
39: np.array([22, 17, 16]),
40: np.array([22, 16, 21]),
41: np.array([16, 18, 21]),
42: np.array([21, 18, 23]),
43: np.array([23, 18, 19]),
44: np.array([23, 19, 24]),
45: np.array([19, 20, 24]),
46: np.array([24, 20, 25]),
},
}
three_level_grid_cell_sets = {
"GRID_L1_1_1": np.array(
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
]
),
"GRID_L2_1_1": np.array([1, 2, 3, 4, 5, 6, 7, 8, 16, 17, 18, 19, 20, 21, 22, 23]),
"GRID_L2_2_1": np.array([9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28]),
"GRID_L2_1_2": np.array([29, 30, 31, 32, 33, 34, 39, 40, 41, 42]),
"GRID_L2_2_2": np.array([35, 36, 37, 38, 43, 44, 45, 46]),
"GRID_L3_1_1": np.array([1, 2, 3, 4]),
"GRID_L3_2_1": np.array([5, 6, 7, 8]),
"GRID_L3_3_1": np.array([9, 10, 11, 12, 13]),
"GRID_L3_4_1": np.array([14, 15]),
"GRID_L3_1_2": np.array([16, 17, 18, 19]),
"GRID_L3_2_2": np.array([20, 21, 22, 23]),
"GRID_L3_3_2": np.array([24, 25, 26]),
"GRID_L3_4_2": np.array([27, 28]),
"GRID_L3_1_3": np.array([29, 30, 31]),
"GRID_L3_2_3": np.array([32, 33, 34]),
"GRID_L3_3_3": np.array([35, 36]),
"GRID_L3_4_3": np.array([37, 38]),
"GRID_L3_1_4": np.array([39, 40]),
"GRID_L3_2_4": np.array([41, 42]),
"GRID_L3_3_4": np.array([43, 44]),
"GRID_L3_4_4": np.array([45, 46]),
}
three_level_l3_1_1_vertices = {
1: np.array([0.0, 0.0, 0.0]),
2: np.array([1.0, 0.0, 0.0]),
3: np.array([1.0, 1.0, 0.0]),
4: np.array([0.0, 1.0, 0.0]),
26: np.array([0.5, 0.0, 0.0]),
27: np.array([1.0, 0.5, 0.0]),
28: np.array([0.5, 1.0, 0.0]),
29: np.array([0.0, 0.5, 0.0]),
38: np.array([0.5, 0.5, 0.0]),
}
three_level_l3_1_1_cells = {
"quad": {
1: np.array([26, 2, 27, 38]),
2: np.array([27, 3, 28, 38]),
3: np.array([1, 26, 38, 29]),
4: np.array([4, 29, 38, 28]),
}
}
three_level_l3_1_2_vertices = {
3: np.array([1.0, 1.0, 0.0]),
4: np.array([0.0, 1.0, 0.0]),
11: np.array([1.0, 2.0, 0.0]),
12: np.array([0.0, 2.0, 0.0]),
28: np.array([0.5, 1.0, 0.0]),
33: np.array([1.0, 1.5, 0.0]),
34: np.array([0.5, 2.0, 0.0]),
35: np.array([0.0, 1.5, 0.0]),
41: np.array([0.5, 1.5, 0.0]),
}
three_level_l3_1_2_cells = {
"quad": {
16: np.array([28, 3, 33, 41]),
17: np.array([33, 11, 34, 41]),
18: np.array([4, 28, 41, 35]),
19: np.array([12, 35, 41, 34]),
}
}
three_level_l3_1_3_vertices = {
11: np.array([1.0, 2.0, 0.0]),
12: np.array([0.0, 2.0, 0.0]),
16: np.array([1.0, 3.0, 0.0]),
17: np.array([0.0, 3.0, 0.0]),
34: np.array([0.5, 2.0, 0.0]),
}
three_level_l3_1_3_cells = {
"triangle": {
29: np.array([17, 34, 16]),
30: np.array([34, 11, 16]),
31: np.array([17, 12, 34]),
}
}
three_level_l3_3_3_vertices = {
13: np.array([2.0, 2.0, 0.0]),
14: np.array([3.0, 2.0, 0.0]),
18: np.array([2.0, 3.0, 0.0]),
19: np.array([3.0, 3.0, 0.0]),
}
three_level_l3_3_3_cells = {
"triangle": {
35: np.array([13, 14, 18]),
36: np.array([18, 14, 19]),
}
}
| 34.940371
| 86
| 0.487816
| 11,249
| 71,488
| 3.081785
| 0.055205
| 0.234632
| 0.020942
| 0.009115
| 0.824531
| 0.804742
| 0.774598
| 0.742262
| 0.708743
| 0.708022
| 0
| 0.50053
| 0.301072
| 71,488
| 2,045
| 87
| 34.957457
| 0.193295
| 0.002126
| 0
| 0.730214
| 0
| 0
| 0.008342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.000498
| 0
| 0.000498
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0abb852d165fd6a726eb2025909ff7b53b450012
| 65,363
|
py
|
Python
|
spec/API_specification/signatures/elementwise_functions.py
|
tirthasheshpatel/array-api
|
3c241a257ae4145f06b379da658817db51a852c1
|
[
"MIT"
] | null | null | null |
spec/API_specification/signatures/elementwise_functions.py
|
tirthasheshpatel/array-api
|
3c241a257ae4145f06b379da658817db51a852c1
|
[
"MIT"
] | null | null | null |
spec/API_specification/signatures/elementwise_functions.py
|
tirthasheshpatel/array-api
|
3c241a257ae4145f06b379da658817db51a852c1
|
[
"MIT"
] | null | null | null |
from ._types import array
def abs(x: array, /) -> array:
"""
Calculates the absolute value for each element ``x_i`` of the input array ``x`` (i.e., the element-wise result has the same magnitude as the respective element in ``x`` but has positive sign).
.. note::
For signed integer data types, the absolute value of the minimum representable integer is implementation-dependent.
**Special Cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``-0``, the result is ``+0``.
- If ``x_i`` is ``-infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the absolute value of each element in ``x``. The returned array must have the same data type as ``x``.
"""
def acos(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation of the principal value of the inverse cosine, having domain ``[-1, +1]`` and codomain ``[+0, +π]``, for each element ``x_i`` of the input array ``x``. Each element-wise result is expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is greater than ``1``, the result is ``NaN``.
- If ``x_i`` is less than ``-1``, the result is ``NaN``.
- If ``x_i`` is ``1``, the result is ``+0``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def acosh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the inverse hyperbolic cosine, having domain ``[+1, +infinity]`` and codomain ``[+0, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``1``, the result is ``NaN``.
- If ``x_i`` is ``1``, the result is ``+0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse hyperbolic cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def add(x1: array, x2: array, /) -> array:
"""
Calculates the sum for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``-infinity``, the result is ``NaN``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``+infinity``, the result is ``NaN``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is a finite number, the result is ``+infinity``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is a finite number, the result is ``-infinity``.
- If ``x1_i`` is a finite number and ``x2_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x1_i`` is a finite number and ``x2_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is ``-0``, the result is ``-0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is ``+0``, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is ``-0``, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is ``+0``, the result is ``+0``.
- If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is a nonzero finite number, the result is ``x2_i``.
- If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+0`` or ``-0``, the result is ``x1_i``.
- If ``x1_i`` is a nonzero finite number and ``x2_i`` is ``-x1_i``, the result is ``+0``.
- In the remaining cases, when neither ``infinity``, ``+0``, ``-0``, nor a ``NaN`` is involved, and the operands have the same mathematical sign or have different magnitudes, the sum must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported round mode. If the magnitude is too large to represent, the operation overflows and the result is an `infinity` of appropriate mathematical sign.
.. note::
Floating-point addition is a commutative operation, but not always associative.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise sums. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def asin(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation of the principal value of the inverse sine, having domain ``[-1, +1]`` and codomain ``[-π/2, +π/2]`` for each element ``x_i`` of the input array ``x``. Each element-wise result is expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is greater than ``1``, the result is ``NaN``.
- If ``x_i`` is less than ``-1``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def asinh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the inverse hyperbolic sine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` in the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
Parameters
----------
x: array
input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse hyperbolic sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def atan(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation of the principal value of the inverse tangent, having domain ``[-infinity, +infinity]`` and codomain ``[-π/2, +π/2]``, for each element ``x_i`` of the input array ``x``. Each element-wise result is expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is an implementation-dependent approximation to ``+π/2``.
- If ``x_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``-π/2``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def atan2(x1: array, x2: array, /) -> array:
"""
Calculates an implementation-dependent approximation of the inverse tangent of the quotient ``x1/x2``, having domain ``[-infinity, +infinity] x [-infinity, +infinity]`` (where the ``x`` notation denotes the set of ordered pairs of elements ``(x1_i, x2_i)``) and codomain ``[-π, +π]``, for each pair of elements ``(x1_i, x2_i)`` of the input arrays ``x1`` and ``x2``, respectively. Each element-wise result is expressed in radians.
The mathematical signs of ``x1_i`` and ``x2_i`` determine the quadrant of each element-wise result. The quadrant (i.e., branch) is chosen such that each element-wise result is the signed angle in radians between the ray ending at the origin and passing through the point ``(1,0)`` and the ray ending at the origin and passing through the point ``(x2_i, x1_i)``.
.. note::
Note the role reversal: the "y-coordinate" is the first function parameter; the "x-coordinate" is the second function parameter. The parameter order is intentional and traditional for the two-argument inverse tangent function where the y-coordinate argument is first and the x-coordinate argument is second.
By IEEE 754 convention, the inverse tangent of the quotient ``x1/x2`` is defined for ``x2_i`` equal to positive or negative zero and for either or both of ``x1_i`` and ``x2_i`` equal to positive or negative ``infinity``.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is an implementation-dependent approximation to ``+π/2``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is an implementation-dependent approximation to ``+π/2``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is ``+0``, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is ``-0``, the result is an implementation-dependent approximation to ``+π``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is an implementation-dependent approximation to ``+π``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is ``+0``, the result is ``-0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is ``-0``, the result is an implementation-dependent approximation to ``-π``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is an implementation-dependent approximation to ``-π``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is an implementation-dependent approximation to ``-π/2``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is an implementation-dependent approximation to ``-π/2``.
- If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``+infinity``, the result is ``+0``.
- If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``+π``.
- If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``+infinity``, the result is ``-0``.
- If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``-π``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is finite, the result is an implementation-dependent approximation to ``+π/2``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is finite, the result is an implementation-dependent approximation to ``-π/2``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``+infinity``, the result is an implementation-dependent approximation to ``+π/4``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``+3π/4``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``+infinity``, the result is an implementation-dependent approximation to ``-π/4``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``-3π/4``.
Parameters
----------
x1: array
input array corresponding to the y-coordinates. Should have a floating-point data type.
x2: array
input array corresponding to the x-coordinates. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse tangent of the quotient ``x1/x2``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def atanh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the inverse hyperbolic tangent, having domain ``[-1, +1]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``-1``, the result is ``NaN``.
- If ``x_i`` is greater than ``1``, the result is ``NaN``.
- If ``x_i`` is ``-1``, the result is ``-infinity``.
- If ``x_i`` is ``+1``, the result is ``+infinity``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
Parameters
----------
x: array
input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type.
Returns
-------
out: array
an array containing the inverse hyperbolic tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def bitwise_and(x1: array, x2: array, /) -> array:
"""
Computes the bitwise AND of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have an integer or boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def bitwise_left_shift(x1: array, x2: array, /) -> array:
"""
Shifts the bits of each element ``x1_i`` of the input array ``x1`` to the left by appending ``x2_i`` (i.e., the respective element in the input array ``x2``) zeros to the right of ``x1_i``.
Parameters
----------
x1: array
first input array. Should have an integer data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer data type. Each element must be greater than or equal to ``0``.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def bitwise_invert(x: array, /) -> array:
"""
Inverts (flips) each bit for each element ``x_i`` of the input array ``x``.
Parameters
----------
x: array
input array. Should have an integer or boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have the same data type as ``x``.
"""
def bitwise_or(x1: array, x2: array, /) -> array:
"""
Computes the bitwise OR of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have an integer or boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def bitwise_right_shift(x1: array, x2: array, /) -> array:
"""
Shifts the bits of each element ``x1_i`` of the input array ``x1`` to the right according to the respective element ``x2_i`` of the input array ``x2``.
.. note::
This operation must be an arithmetic shift (i.e., sign-propagating) and thus equivalent to floor division by a power of two.
Parameters
----------
x1: array
first input array. Should have an integer data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer data type. Each element must be greater than or equal to ``0``.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def bitwise_xor(x1: array, x2: array, /) -> array:
"""
Computes the bitwise XOR of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have an integer or boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def ceil(x: array, /) -> array:
"""
Rounds each element ``x_i`` of the input array ``x`` to the smallest (i.e., closest to ``-infinity``) integer-valued number that is not less than ``x_i``.
**Special cases**
- If ``x_i`` is already integer-valued, the result is ``x_i``.
For floating-point operands,
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``NaN``, the result is ``NaN``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
def cos(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the cosine, having domain ``(-infinity, +infinity)`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``1``.
- If ``x_i`` is ``-0``, the result is ``1``.
- If ``x_i`` is ``+infinity``, the result is ``NaN``.
- If ``x_i`` is ``-infinity``, the result is ``NaN``.
Parameters
----------
x: array
input array whose elements are each expressed in radians. Should have a floating-point data type.
Returns
-------
out: array
an array containing the cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def cosh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the hyperbolic cosine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` in the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``1``.
- If ``x_i`` is ``-0``, the result is ``1``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array whose elements each represent a hyperbolic angle. Should have a floating-point data type.
Returns
-------
out: array
an array containing the hyperbolic cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def divide(x1: array, x2: array, /) -> array:
"""
Calculates the division for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
.. note::
If one or both of the input arrays have integer data types, the result is implementation-dependent, as type promotion between data type "kinds" (e.g., integer versus floating-point) is unspecified.
Specification-compliant libraries may choose to raise an error or return an array containing the element-wise results. If an array is returned, the array must have a floating-point data type.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
- If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``-0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is ``+0``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is ``+infinity``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is ``-infinity``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is ``-infinity``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is ``+infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``.
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``.
- If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``+0``.
- If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``-0``.
- If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``-0``.
- If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``+0``.
- If ``x1_i`` and ``x2_i`` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign.
- If ``x1_i`` and ``x2_i`` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign.
- In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the operation overflows and the result is an ``infinity`` of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign.
Parameters
----------
x1: array
dividend input array. Should have a numeric data type.
x2: array
divisor input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def equal(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i == x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. May have any data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). May have any data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def exp(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the exponential function, having domain ``[-infinity, +infinity]`` and codomain ``[+0, +infinity]``, for each element ``x_i`` of the input array ``x`` (``e`` raised to the power of ``x_i``, where ``e`` is the base of the natural logarithm).
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``1``.
- If ``x_i`` is ``-0``, the result is ``1``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``+0``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the evaluated exponential function result for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def expm1(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to ``exp(x)-1``, having domain ``[-infinity, +infinity]`` and codomain ``[-1, +infinity]``, for each element ``x_i`` of the input array ``x``.
.. note::
The purpose of this function is to calculate ``exp(x)-1.0`` more accurately when `x` is close to zero. Accordingly, conforming implementations should avoid implementing this function as simply ``exp(x)-1.0``. See FDLIBM, or some other IEEE 754-2019 compliant mathematical library, for a potential reference implementation.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-1``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def floor(x: array, /) -> array:
"""
Rounds each element ``x_i`` of the input array ``x`` to the greatest (i.e., closest to ``+infinity``) integer-valued number that is not greater than ``x_i``.
**Special cases**
- If ``x_i`` is already integer-valued, the result is ``x_i``.
For floating-point operands,
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``NaN``, the result is ``NaN``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
def floor_divide(x1: array, x2: array, /) -> array:
"""
Rounds the result of dividing each element ``x1_i`` of the input array ``x1`` by the respective element ``x2_i`` of the input array ``x2`` to the greatest (i.e., closest to `+infinity`) integer-value number that is not greater than the division result.
.. note::
For input arrays which promote to an integer data type, the result of division by zero is unspecified and thus implementation-defined.
**Special cases**
.. note::
Floor division was introduced in Python via `PEP 238 <https://www.python.org/dev/peps/pep-0238/>`_ with the goal to disambiguate "true division" (i.e., computing an approximation to the mathematical operation of division) from "floor division" (i.e., rounding the result of division toward negative infinity). The former was computed when one of the operands was a ``float``, while the latter was computed when both operands were ``int``\s. Overloading the ``/`` operator to support both behaviors led to subtle numerical bugs when integers are possible, but not expected.
To resolve this ambiguity, ``/`` was designated for true division, and ``//`` was designated for floor division. Semantically, floor division was `defined <https://www.python.org/dev/peps/pep-0238/#semantics-of-floor-division>`_ as equivalent to ``a // b == floor(a/b)``; however, special floating-point cases were left ill-defined.
Accordingly, floor division is not implemented consistently across array libraries for some of the special cases documented below. Namely, when one of the operands is ``infinity``, libraries may diverge with some choosing to strictly follow ``floor(a/b)`` and others choosing to pair ``//`` with ``%`` according to the relation ``b = a % b + b * (a // b)``. The special cases leading to divergent behavior are documented below.
This specification prefers floor division to match ``floor(divide(x1, x2))`` in order to avoid surprising and unexpected results; however, array libraries may choose to more strictly follow Python behavior.
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
- If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``-0``.
- If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is ``+0``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is ``+infinity``.
- If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is ``-infinity``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is ``-infinity``.
- If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is ``+infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.)
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.)
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.)
- If ``x1_i`` is ``-infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.)
- If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``+0``.
- If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``-0``. (**note**: libraries may return ``-1.0`` to match Python behavior.)
- If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``-0``. (**note**: libraries may return ``-1.0`` to match Python behavior.)
- If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``+0``.
- If ``x1_i`` and ``x2_i`` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign.
- If ``x1_i`` and ``x2_i`` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign.
- In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the greatest (i.e., closest to `+infinity`) representable integer-value number that is not greater than the division result. If the magnitude is too large to represent, the operation overflows and the result is an ``infinity`` of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign.
Parameters
----------
x1: array
dividend input array. Should have a numeric data type.
x2: array
divisor input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def greater(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i > x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def greater_equal(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i >= x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def isfinite(x: array, /) -> array:
"""
Tests each element ``x_i`` of the input array ``x`` to determine if finite (i.e., not ``NaN`` and not equal to positive or negative infinity).
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is finite and ``False`` otherwise. The returned array must have a data type of ``bool``.
"""
def isinf(x: array, /) -> array:
"""
Tests each element ``x_i`` of the input array ``x`` to determine if equal to positive or negative infinity.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is either positive or negative infinity and ``False`` otherwise. The returned array must have a data type of ``bool``.
"""
def isnan(x: array, /) -> array:
"""
Tests each element ``x_i`` of the input array ``x`` to determine whether the element is ``NaN``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is ``NaN`` and ``False`` otherwise. The returned array should have a data type of ``bool``.
"""
def less(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i < x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def less_equal(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i <= x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def log(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the natural (base ``e``) logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``0``, the result is ``NaN``.
- If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``.
- If ``x_i`` is ``1``, the result is ``+0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the evaluated natural logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def log1p(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to ``log(1+x)``, where ``log`` refers to the natural (base ``e``) logarithm, having domain ``[-1, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
.. note::
The purpose of this function is to calculate ``log(1+x)`` more accurately when `x` is close to zero. Accordingly, conforming implementations should avoid implementing this function as simply ``log(1+x)``. See FDLIBM, or some other IEEE 754-2019 compliant mathematical library, for a potential reference implementation.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``-1``, the result is ``NaN``.
- If ``x_i`` is ``-1``, the result is ``-infinity``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def log2(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the base ``2`` logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``0``, the result is ``NaN``.
- If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``.
- If ``x_i`` is ``1``, the result is ``+0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the evaluated base ``2`` logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def log10(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the base ``10`` logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``0``, the result is ``NaN``.
- If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``.
- If ``x_i`` is ``1``, the result is ``+0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the evaluated base ``10`` logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def logaddexp(x1: array, x2: array) -> array:
"""
Calculates the logarithm of the sum of exponentiations ``log(exp(x1) + exp(x2))`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is not ``NaN``, the result is ``+infinity``.
- If ``x1_i`` is not ``NaN`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x1: array
first input array. Should have a floating-point data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a floating-point data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def logical_and(x1: array, x2: array, /) -> array:
"""
Computes the logical AND for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
.. note::
While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``.
Parameters
----------
x1: array
first input array. Should have a boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of `bool`.
"""
def logical_not(x: array, /) -> array:
"""
Computes the logical NOT for each element ``x_i`` of the input array ``x``.
.. note::
While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``.
Parameters
----------
x: array
input array. Should have a boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def logical_or(x1: array, x2: array, /) -> array:
"""
Computes the logical OR for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
.. note::
While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``.
Parameters
----------
x1: array
first input array. Should have a boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def logical_xor(x1: array, x2: array, /) -> array:
"""
Computes the logical XOR for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
.. note::
While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``.
Parameters
----------
x1: array
first input array. Should have a boolean data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a boolean data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def multiply(x1: array, x2: array, /) -> array:
"""
Calculates the product for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``.
- If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
- If ``x1_i`` and ``x2_i`` have the same mathematical sign, the result has a positive mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the "sign" of ``NaN`` is implementation-defined.
- If ``x1_i`` and ``x2_i`` have different mathematical signs, the result has a negative mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the "sign" of ``NaN`` is implementation-defined.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is a nonzero finite number, the result is a signed infinity with the mathematical sign determined by the rule already stated above.
- If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above.
- In the remaining cases, where neither ``infinity`` nor ``NaN`` is involved, the product must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the result is an `infinity` of appropriate mathematical sign. If the magnitude is too small to represent, the result is a zero of appropriate mathematical sign.
.. note::
Floating-point multiplication is not always associative due to finite precision.
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise products. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def negative(x: array, /) -> array:
"""
Computes the numerical negative of each element ``x_i`` (i.e., ``y_i = -x_i``) of the input array ``x``.
.. note::
For signed integer data types, the numerical negative of the minimum representable integer is implementation-dependent.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def not_equal(x1: array, x2: array, /) -> array:
"""
Computes the truth value of ``x1_i != x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``.
Parameters
----------
x1: array
first input array. May have any data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`).
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type of ``bool``.
"""
def positive(x: array, /) -> array:
"""
Computes the numerical positive of each element ``x_i`` (i.e., ``y_i = +x_i``) of the input array ``x``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
def pow(x1: array, x2: array, /) -> array:
"""
Calculates an implementation-dependent approximation of exponentiation by raising each element ``x1_i`` (the base) of the input array ``x1`` to the power of ``x2_i`` (the exponent), where ``x2_i`` is the corresponding element of the input array ``x2``.
.. note::
If both ``x1`` and ``x2`` have integer data types, the result of ``pow`` when ``x2_i`` is negative (i.e., less than zero) is unspecified and thus implementation-dependent.
If ``x1`` has an integer data type and ``x2`` has a floating-point data type, behavior is implementation-dependent (type promotion between data type "kinds" (integer versus floating-point) is unspecified).
**Special cases**
For floating-point operands,
- If ``x1_i`` is not equal to ``1`` and ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x2_i`` is ``+0``, the result is ``1``, even if ``x1_i`` is ``NaN``.
- If ``x2_i`` is ``-0``, the result is ``1``, even if ``x1_i`` is ``NaN``.
- If ``x1_i`` is ``NaN`` and ``x2_i`` is not equal to ``0``, the result is ``NaN``.
- If ``abs(x1_i)`` is greater than ``1`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``.
- If ``abs(x1_i)`` is greater than ``1`` and ``x2_i`` is ``-infinity``, the result is ``+0``.
- If ``abs(x1_i)`` is ``1`` and ``x2_i`` is ``+infinity``, the result is ``1``.
- If ``abs(x1_i)`` is ``1`` and ``x2_i`` is ``-infinity``, the result is ``1``.
- If ``x1_i`` is ``1`` and ``x2_i`` is not ``NaN``, the result is ``1``.
- If ``abs(x1_i)`` is less than ``1`` and ``x2_i`` is ``+infinity``, the result is ``+0``.
- If ``abs(x1_i)`` is less than ``1`` and ``x2_i`` is ``-infinity``, the result is ``+infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is greater than ``0``, the result is ``+infinity``.
- If ``x1_i`` is ``+infinity`` and ``x2_i`` is less than ``0``, the result is ``+0``.
- If ``x1_i`` is ``-infinity``, ``x2_i`` is greater than ``0``, and ``x2_i`` is an odd integer value, the result is ``-infinity``.
- If ``x1_i`` is ``-infinity``, ``x2_i`` is greater than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+infinity``.
- If ``x1_i`` is ``-infinity``, ``x2_i`` is less than ``0``, and ``x2_i`` is an odd integer value, the result is ``-0``.
- If ``x1_i`` is ``-infinity``, ``x2_i`` is less than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``.
- If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``+infinity``.
- If ``x1_i`` is ``-0``, ``x2_i`` is greater than ``0``, and ``x2_i`` is an odd integer value, the result is ``-0``.
- If ``x1_i`` is ``-0``, ``x2_i`` is greater than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+0``.
- If ``x1_i`` is ``-0``, ``x2_i`` is less than ``0``, and ``x2_i`` is an odd integer value, the result is ``-infinity``.
- If ``x1_i`` is ``-0``, ``x2_i`` is less than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+infinity``.
- If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, ``x2_i`` is a finite number, and ``x2_i`` is not an integer value, the result is ``NaN``.
Parameters
----------
x1: array
first input array whose elements correspond to the exponentiation base. Should have a numeric data type.
x2: array
second input array whose elements correspond to the exponentiation exponent. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def remainder(x1: array, x2: array, /) -> array:
"""
Returns the remainder of division for each element ``x1_i`` of the input array ``x1`` and the respective element ``x2_i`` of the input array ``x2``.
.. note::
For input arrays which promote to an integer data type, the result of division by zero is unspecified and thus implementation-defined.
Parameters
----------
x1: array
dividend input array. Should have a numeric data type.
x2: array
divisor input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise results. Each element-wise result must have the same sign as the respective element ``x2_i``. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def round(x: array, /) -> array:
"""
Rounds each element ``x_i`` of the input array ``x`` to the nearest integer-valued number.
**Special cases**
- If ``x_i`` is already integer-valued, the result is ``x_i``.
For floating-point operands,
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If two integers are equally close to ``x_i``, the result is the even integer closest to ``x_i``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
def sign(x: array, /) -> array:
"""
Returns an indication of the sign of a number for each element ``x_i`` of the input array ``x``.
**Special cases**
- If ``x_i`` is less than ``0``, the result is ``-1``.
- If ``x_i`` is either ``-0`` or ``+0``, the result is ``0``.
- If ``x_i`` is greater than ``0``, the result is ``+1``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
def sin(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the sine, having domain ``(-infinity, +infinity)`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
Parameters
----------
x: array
input array whose elements are each expressed in radians. Should have a floating-point data type.
Returns
-------
out: array
an array containing the sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def sinh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the hyperbolic sine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
Parameters
----------
x: array
input array whose elements each represent a hyperbolic angle. Should have a floating-point data type.
Returns
-------
out: array
an array containing the hyperbolic sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def square(x: array, /) -> array:
"""
Squares (``x_i * x_i``) each element ``x_i`` of the input array ``x``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the evaluated result for each element in ``x``. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def sqrt(x: array, /) -> array:
"""
Calculates the square root, having domain ``[0, +infinity]`` and codomain ``[0, +infinity]``, for each element ``x_i`` of the input array ``x``. After rounding, each result must be indistinguishable from the infinitely precise result (as required by IEEE 754).
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is less than ``0``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
Parameters
----------
x: array
input array. Should have a floating-point data type.
Returns
-------
out: array
an array containing the square root of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def subtract(x1: array, x2: array, /) -> array:
"""
Calculates the difference for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. The result of ``x1_i - x2_i`` must be the same as ``x1_i + (-x2_i)`` and must be governed by the same floating-point rules as addition (see :meth:`add`).
Parameters
----------
x1: array
first input array. Should have a numeric data type.
x2: array
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
out: array
an array containing the element-wise differences. The returned array must have a data type determined by :ref:`type-promotion`.
"""
def tan(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the tangent, having domain ``(-infinity, +infinity)`` and codomain ``(-infinity, +infinity)``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
Parameters
----------
x: array
input array whose elements are expressed in radians. Should have a floating-point data type.
Returns
-------
out: array
an array containing the tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def tanh(x: array, /) -> array:
"""
Calculates an implementation-dependent approximation to the hyperbolic tangent, having domain ``[-infinity, +infinity]`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``+infinity``, the result is ``+1``.
- If ``x_i`` is ``-infinity``, the result is ``-1``.
Parameters
----------
x: array
input array whose elements each represent a hyperbolic angle. Should have a floating-point data type.
Returns
-------
out: array
an array containing the hyperbolic tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
def trunc(x: array, /) -> array:
"""
Rounds each element ``x_i`` of the input array ``x`` to the integer-valued number that is closest to but no greater than ``x_i``.
**Special cases**
- If ``x_i`` is already integer-valued, the result is ``x_i``.
For floating-point operands,
- If ``x_i`` is ``+infinity``, the result is ``+infinity``.
- If ``x_i`` is ``-infinity``, the result is ``-infinity``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is ``NaN``, the result is ``NaN``.
Parameters
----------
x: array
input array. Should have a numeric data type.
Returns
-------
out: array
an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``.
"""
__all__ = ['abs', 'acos', 'acosh', 'add', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'bitwise_and', 'bitwise_left_shift', 'bitwise_invert', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'ceil', 'cos', 'cosh', 'divide', 'equal', 'exp', 'expm1', 'floor', 'floor_divide', 'greater', 'greater_equal', 'isfinite', 'isinf', 'isnan', 'less', 'less_equal', 'log', 'log1p', 'log2', 'log10', 'logaddexp', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'multiply', 'negative', 'not_equal', 'positive', 'pow', 'remainder', 'round', 'sign', 'sin', 'sinh', 'square', 'sqrt', 'subtract', 'tan', 'tanh', 'trunc']
| 48.025716
| 614
| 0.620932
| 9,709
| 65,363
| 4.124112
| 0.041302
| 0.027197
| 0.067855
| 0.01903
| 0.89713
| 0.885667
| 0.869808
| 0.854349
| 0.83377
| 0.821883
| 0
| 0.017973
| 0.218549
| 65,363
| 1,361
| 614
| 48.025716
| 0.765946
| 0.886603
| 0
| 0
| 0
| 0
| 0.118787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.965517
| false
| 0
| 0.017241
| 0
| 0.982759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
0aef5ae84bf16c225fcd272bf1b12f4b6255d853
| 2,408
|
py
|
Python
|
api/migrations/0002_auto_20200621_1340.py
|
UsernameForGerman/tiktune
|
5e888a962b80a55c057b71e6e2f80f2a9cfbec12
|
[
"Apache-2.0"
] | null | null | null |
api/migrations/0002_auto_20200621_1340.py
|
UsernameForGerman/tiktune
|
5e888a962b80a55c057b71e6e2f80f2a9cfbec12
|
[
"Apache-2.0"
] | 5
|
2021-03-30T13:48:21.000Z
|
2021-09-22T19:22:03.000Z
|
api/migrations/0002_auto_20200621_1340.py
|
UsernameForGerman/tiktune
|
5e888a962b80a55c057b71e6e2f80f2a9cfbec12
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-21 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='artist',
name='deezer_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='deezer id'),
),
migrations.AlterField(
model_name='artist',
name='itunes_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='apple id'),
),
migrations.AlterField(
model_name='artist',
name='musicstory_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='musicstory id'),
),
migrations.AlterField(
model_name='artist',
name='play_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='play id'),
),
migrations.AlterField(
model_name='artist',
name='spotify_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='spotify id'),
),
migrations.AlterField(
model_name='song',
name='deezer_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='deezer id'),
),
migrations.AlterField(
model_name='song',
name='itunes_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='apple id'),
),
migrations.AlterField(
model_name='song',
name='musicstory_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='musicstory id'),
),
migrations.AlterField(
model_name='song',
name='play_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='play id'),
),
migrations.AlterField(
model_name='song',
name='spotify_id',
field=models.SlugField(blank=True, max_length=512, null=True, unique=True, verbose_name='spotify id'),
),
]
| 37.625
| 117
| 0.593023
| 267
| 2,408
| 5.194757
| 0.172285
| 0.144196
| 0.180245
| 0.209084
| 0.901226
| 0.901226
| 0.875991
| 0.82336
| 0.82336
| 0.82336
| 0
| 0.028258
| 0.2799
| 2,408
| 63
| 118
| 38.222222
| 0.771626
| 0.018688
| 0
| 0.877193
| 1
| 0
| 0.108005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017544
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
7c49aaf24822124c66feca9a476e3ad17c7fb62a
| 22,538
|
py
|
Python
|
db.py
|
OpenRobot/MS-Counter
|
a58501a02d13716f119c3eb5175e7a3ee9040fee
|
[
"MIT"
] | null | null | null |
db.py
|
OpenRobot/MS-Counter
|
a58501a02d13716f119c3eb5175e7a3ee9040fee
|
[
"MIT"
] | null | null | null |
db.py
|
OpenRobot/MS-Counter
|
a58501a02d13716f119c3eb5175e7a3ee9040fee
|
[
"MIT"
] | null | null | null |
import asyncpg, asyncio, json, time, random
class RomanDatabase:
def __init__(self, loop, db_uri):
self.uri = str(db_uri)
self.db = None
loop.run_until_complete(self.initialize())
self.is_modifying = False
def wait_until_not_modifying(self):
while self.is_modifying:
pass
return
async def initialize(self):
while True:
try:
self.db = await asyncpg.create_pool(self.uri, max_size=20, min_size=5)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS counter(
count BIGINT DEFAULT 0
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM counter")
if not x:
await conn.execute("INSERT INTO counter VALUES ($1)", 0)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.execute("""
CREATE TABLE IF NOT EXISTS counter_lb(
user_id BIGINT,
counts BIGINT DEFAULT 0,
recent_counts JSONB NOT NULL DEFAULT '[]'::jsonb
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
return self.db
async def get_current_number(self) -> int:
while True:
self.wait_until_not_modifying()
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM counter")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
if x is None:
await self.initialize()
return await self.get_current_number()
else:
return x['count']
async def set_number(self, author, msg, num: int):
self.is_modifying = True
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("UPDATE counter SET count = $1", int(num))
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM counter_lb WHERE user_id = $1", int(author.id))
if x:
await conn.execute(
"UPDATE counter_lb SET counts = $2, recent_counts = $3 WHERE user_id = $1",
int(author.id),
len(json.loads(x['recent_counts'])) + 1,
json.dumps(json.loads(x['recent_counts']) + [{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}])
)
else:
await conn.execute(
"INSERT INTO counter_lb VALUES ($1, $2, $3)",
int(author.id),
1,
json.dumps(
[{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}]
)
)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
self.is_modifying = False
return int(num)
class BinaryDatabase:
def __init__(self, loop, db_uri, db_obj):
self.uri = str(db_uri)
self.db = None
self.is_modifying = False
self.db_obj = db_obj
loop.run_until_complete(self.initialize())
def wait_until_not_modifying(self):
while self.is_modifying:
pass
return
async def initialize(self):
if not self.db_obj: # For making sure it doesnt have too many connections and traffic and to handle TooManyConnectionsError.
while True:
try:
self.db = await asyncpg.create_pool(self.uri)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
else:
self.db = self.db_obj
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS binary_counter(
count BIGINT DEFAULT 0
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM binary_counter")
if not x:
await conn.execute("INSERT INTO binary_counter VALUES ($1)", 0)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.execute("""
CREATE TABLE IF NOT EXISTS binary_counter_lb(
user_id BIGINT,
counts BIGINT DEFAULT 0,
recent_counts JSONB NOT NULL DEFAULT '[]'::jsonb
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
return self.db
async def get_current_number(self) -> int:
while True:
self.wait_until_not_modifying()
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM binary_counter")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
if x is None:
await self.initialize()
return await self.get_current_number()
else:
return x['count']
async def set_number(self, author, msg, num: int):
self.is_modifying = True
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("UPDATE binary_counter SET count = $1", int(num))
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM binary_counter_lb WHERE user_id = $1", int(author.id))
if x:
await conn.execute(
"UPDATE binary_counter_lb SET counts = $2, recent_counts = $3 WHERE user_id = $1",
int(author.id),
len(json.loads(x['recent_counts'])) + 1,
json.dumps(json.loads(x['recent_counts']) + [{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}])
)
else:
await conn.execute(
"INSERT INTO binary_counter_lb VALUES ($1, $2, $3)",
int(author.id),
1,
json.dumps(
[{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}]
)
)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
self.is_modifying = False
return int(num)
class HexadecimalDatabase:
def __init__(self, loop, db_uri, db_obj):
self.uri = str(db_uri)
self.db = None
self.is_modifying = False
self.db_obj = db_obj
loop.run_until_complete(self.initialize())
def wait_until_not_modifying(self):
while self.is_modifying:
pass
return
async def initialize(self):
if not self.db_obj: # For making sure it doesnt have too many connections and traffic and to handle TooManyConnectionsError.
while True:
try:
self.db = await asyncpg.create_pool(self.uri)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
else:
self.db = self.db_obj
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS hexadecimal_counter(
count BIGINT DEFAULT 0
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM hexadecimal_counter")
if not x:
await conn.execute("INSERT INTO hexadecimal_counter VALUES ($1)", 0)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.execute("""
CREATE TABLE IF NOT EXISTS hexadecimal_counter_lb(
user_id BIGINT,
counts BIGINT DEFAULT 0,
recent_counts JSONB NOT NULL DEFAULT '[]'::jsonb
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
return self.db
async def get_current_number(self) -> int:
while True:
self.wait_until_not_modifying()
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM hexadecimal_counter")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
if x is None:
await self.initialize()
return await self.get_current_number()
else:
return x['count']
async def set_number(self, author, msg, num: int):
self.is_modifying = True
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("UPDATE hexadecimal_counter SET count = $1", int(num))
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM hexadecimal_counter_lb WHERE user_id = $1", int(author.id))
if x:
await conn.execute(
"UPDATE hexadecimal_counter_lb SET counts = $2, recent_counts = $3 WHERE user_id = $1",
int(author.id),
len(json.loads(x['recent_counts'])) + 1,
json.dumps(json.loads(x['recent_counts']) + [{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}])
)
else:
await conn.execute(
"INSERT INTO hexadecimal_counter_lb VALUES ($1, $2, $3)",
int(author.id),
1,
json.dumps(
[{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}]
)
)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
self.is_modifying = False
return int(num)
class OctalDatabase:
def __init__(self, loop, db_uri, db_obj = None):
self.uri = str(db_uri)
self.db = None
self.is_modifying = False
self.db_obj = db_obj
loop.run_until_complete(self.initialize())
def wait_until_not_modifying(self):
while self.is_modifying:
pass
return
async def initialize(self):
if not self.db_obj: # For making sure it doesnt have too many connections and traffic and to handle TooManyConnectionsError.
while True:
try:
self.db = await asyncpg.create_pool(self.uri)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
else:
self.db = self.db_obj
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS octal_counter(
count BIGINT DEFAULT 0
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM octal_counter")
if not x:
await conn.execute("INSERT INTO octal_counter VALUES ($1)", 0)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.execute("""
CREATE TABLE IF NOT EXISTS octal_counter_lb(
user_id BIGINT,
counts BIGINT DEFAULT 0,
recent_counts JSONB NOT NULL DEFAULT '[]'::jsonb
);
""")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
return self.db
async def get_current_number(self) -> int:
while True:
self.wait_until_not_modifying()
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM octal_counter")
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
if x is None:
await self.initialize()
return await self.get_current_number()
else:
return x['count']
async def set_number(self, author, msg, num: int):
self.is_modifying = True
while True:
try:
async with self.db.acquire() as conn:
await conn.execute("UPDATE octal_counter SET count = $1", int(num))
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
while True:
try:
async with self.db.acquire() as conn:
x = await conn.fetchrow("SELECT * FROM octal_counter_lb WHERE user_id = $1", int(author.id))
if x:
await conn.execute(
"UPDATE octal_counter_lb SET counts = $2, recent_counts = $3 WHERE user_id = $1",
int(author.id),
len(json.loads(x['recent_counts'])) + 1,
json.dumps(json.loads(x['recent_counts']) + [{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}])
)
else:
await conn.execute(
"INSERT INTO octal_counter_lb VALUES ($1, $2, $3)",
int(author.id),
1,
json.dumps(
[{
'num': int(num),
'message_id': msg.id,
'message_url': msg.jump_url,
'channel_id': msg.channel.id,
'timestamp': msg.created_at.timestamp()
}]
)
)
except asyncpg.exceptions._base.InterfaceError:
await asyncio.sleep(5)
pass
else:
break
self.is_modifying = False
return int(num)
class Database:
def __init__(self, *args, **kwargs):
self.roman = RomanDatabase(*args, **kwargs)
kwargs['db_obj'] = self.roman.db
self.binary = BinaryDatabase(*args, **kwargs)
self.hexadecimal = HexadecimalDatabase(*args, **kwargs)
self.octal = OctalDatabase(*args, **kwargs)
async def latency(self, *, all = False, choice = None):
if all:
d = {}
start = time.perf_counter()
await self.roman.db.fetch("SELECT 1")
end = time.perf_counter()
roman = (end-start) * 1000
d['roman'] = roman
start = time.perf_counter()
await self.binary.db.fetch("SELECT 1")
end = time.perf_counter()
binary = (end-start) * 1000
d['binary'] = binary
start = time.perf_counter()
await self.hexadecimal.db.fetch("SELECT 1")
end = time.perf_counter()
hexadecimal = (end-start) * 1000
d['hexadecimal'] = hexadecimal
start = time.perf_counter()
await self.octal.db.fetch("SELECT 1")
end = time.perf_counter()
octal = (end-start) * 1000
d['octal'] = octal
return d
else:
if choice is None:
choice = random.choice([self.roman, self.binary, self.hexadecimal, self.octal])
else:
if isinstance(choice, str):
choice = getattr(self, choice, random.choice([self.roman, self.binary, self.hexadecimal, self.octal]))
start = time.perf_counter()
await choice.db.fetch("SELECT 1")
end = time.perf_counter()
choice_latency = (end-start) * 1000
return choice_latency
| 34.996894
| 132
| 0.440989
| 2,070
| 22,538
| 4.671498
| 0.063768
| 0.029783
| 0.066598
| 0.07818
| 0.932575
| 0.92999
| 0.907859
| 0.903309
| 0.881593
| 0.868356
| 0
| 0.009571
| 0.480788
| 22,538
| 644
| 133
| 34.996894
| 0.816783
| 0.013666
| 0
| 0.859459
| 0
| 0
| 0.155397
| 0.004004
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016216
| false
| 0.057658
| 0.001802
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
7c582d0f4c1c0c161f1c3abb2dceaf5fec0ccfc8
| 28,892
|
py
|
Python
|
saleor/graphql/checkout/tests/test_checkout_lines.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/test_checkout_lines.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/test_checkout_lines.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
import datetime
import uuid
from unittest import mock
import graphene
from ....checkout.error_codes import CheckoutErrorCode
from ....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ....checkout.utils import calculate_checkout_quantity
from ....plugins.manager import get_plugins_manager
from ....product.models import ProductChannelListing
from ...tests.utils import get_graphql_content
from ..mutations import update_checkout_shipping_method_if_invalid
MUTATION_CHECKOUT_LINES_ADD = """
mutation checkoutLinesAdd(
$token: UUID, $lines: [CheckoutLineInput!]!) {
checkoutLinesAdd(token: $token, lines: $lines) {
checkout {
token
quantity
lines {
quantity
variant {
id
}
}
}
errors {
field
code
message
variants
}
}
}"""
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_lines_add(
mocked_update_shipping_method, user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
line = checkout.lines.first()
lines = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
lines = fetch_checkout_lines(checkout)
line = checkout.lines.latest("pk")
assert line.variant == variant
assert line.quantity == 1
assert calculate_checkout_quantity(lines) == 4
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_lines_add_existing_variant(user_api_client, checkout_with_item):
checkout = checkout_with_item
line = checkout.lines.first()
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 7}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
line = checkout.lines.latest("pk")
assert line.quantity == 10
def test_checkout_lines_add_existing_variant_over_allowed_stock(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
current_stock = line.variant.stocks.first()
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": current_stock.quantity - 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesAdd"]["errors"]
assert errors[0]["code"] == CheckoutErrorCode.INSUFFICIENT_STOCK.name
def test_checkout_lines_add_with_unavailable_variant(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
variant.channel_listings.filter(channel=checkout_with_item.channel).update(
price_amount=None
)
checkout = checkout_with_item
line = checkout.lines.first()
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesAdd"]["errors"]
assert errors[0]["code"] == CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL.name
assert errors[0]["field"] == "lines"
assert errors[0]["variants"] == [variant_id]
def test_checkout_lines_add_with_insufficient_stock(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 49}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesAdd"]["errors"]
assert errors[0]["code"] == CheckoutErrorCode.INSUFFICIENT_STOCK.name
assert errors[0]["field"] == "quantity"
def test_checkout_lines_add_with_zero_quantity(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 0}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesAdd"]["errors"]
assert errors[0]["code"] == CheckoutErrorCode.ZERO_QUANTITY.name
assert errors[0]["field"] == "quantity"
def test_checkout_lines_add_no_channel_shipping_zones(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
checkout.channel.shipping_zones.clear()
line = checkout.lines.first()
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
errors = data["errors"]
assert len(errors) == 1
assert errors[0]["code"] == CheckoutErrorCode.INSUFFICIENT_STOCK.name
assert errors[0]["field"] == "quantity"
def test_checkout_lines_add_with_unpublished_product(
user_api_client, checkout_with_item, stock, channel_USD
):
variant = stock.product_variant
product = variant.product
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout_with_item.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout_with_item.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
error = content["data"]["checkoutLinesAdd"]["errors"][0]
assert error["code"] == CheckoutErrorCode.PRODUCT_NOT_PUBLISHED.name
def test_checkout_lines_add_with_unavailable_for_purchase_product(
user_api_client, checkout_with_item, stock
):
# given
variant = stock.product_variant
product = stock.product_variant.product
product.channel_listings.update(available_for_purchase=None)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout_with_item.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
# when
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
# then
content = get_graphql_content(response)
error = content["data"]["checkoutLinesAdd"]["errors"][0]
assert error["field"] == "lines"
assert error["code"] == CheckoutErrorCode.PRODUCT_UNAVAILABLE_FOR_PURCHASE.name
assert error["variants"] == [variant_id]
def test_checkout_lines_add_with_available_for_purchase_from_tomorrow_product(
user_api_client, checkout_with_item, stock
):
# given
variant = stock.product_variant
product = stock.product_variant.product
product.channel_listings.update(
available_for_purchase=datetime.date.today() + datetime.timedelta(days=1)
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout_with_item.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
# when
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
# then
content = get_graphql_content(response)
error = content["data"]["checkoutLinesAdd"]["errors"][0]
assert error["field"] == "lines"
assert error["code"] == CheckoutErrorCode.PRODUCT_UNAVAILABLE_FOR_PURCHASE.name
assert error["variants"] == [variant_id]
def test_checkout_lines_add_too_many(user_api_client, checkout_with_item, stock):
variant = stock.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout_with_item.token,
"lines": [{"variantId": variant_id, "quantity": 51}],
"channelSlug": checkout_with_item.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)["data"]["checkoutLinesAdd"]
assert content["errors"]
assert content["errors"] == [
{
"field": "quantity",
"message": "Cannot add more than 50 times this item.",
"code": "QUANTITY_GREATER_THAN_LIMIT",
"variants": None,
}
]
def test_checkout_lines_add_empty_checkout(user_api_client, checkout, stock):
variant = stock.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
line = checkout.lines.first()
assert line.variant == variant
assert line.quantity == 1
def test_checkout_lines_add_variant_without_inventory_tracking(
user_api_client, checkout, variant_without_inventory_tracking
):
variant = variant_without_inventory_tracking
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
line = checkout.lines.first()
assert line.variant == variant
assert line.quantity == 1
def test_checkout_lines_add_check_lines_quantity(user_api_client, checkout, stock):
variant = stock.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 16}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert data["errors"][0]["message"] == (
"Could not add items SKU_A. Only 15 remaining in stock."
)
assert data["errors"][0]["field"] == "quantity"
def test_checkout_lines_invalid_variant_id(user_api_client, checkout, stock):
variant = stock.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
invalid_variant_id = "InvalidId"
variables = {
"token": checkout.token,
"lines": [
{"variantId": variant_id, "quantity": 1},
{"variantId": invalid_variant_id, "quantity": 3},
],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
error_msg = "Could not resolve to a node with the global id list of '%s'."
assert data["errors"][0]["message"] == error_msg % [invalid_variant_id]
assert data["errors"][0]["field"] == "variantId"
MUTATION_CHECKOUT_LINES_UPDATE = """
mutation checkoutLinesUpdate(
$token: UUID, $lines: [CheckoutLineInput!]!) {
checkoutLinesUpdate(token: $token, lines: $lines) {
checkout {
token
quantity
lines {
quantity
variant {
id
}
}
}
errors {
field
code
message
variants
}
}
}
"""
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_lines_update(
mocked_update_shipping_method, user_api_client, checkout_with_item
):
checkout = checkout_with_item
lines = fetch_checkout_lines(checkout)
assert checkout.lines.count() == 1
assert calculate_checkout_quantity(lines) == 3
line = checkout.lines.first()
variant = line.variant
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout_with_item.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
lines = fetch_checkout_lines(checkout)
assert checkout.lines.count() == 1
line = checkout.lines.first()
assert line.variant == variant
assert line.quantity == 1
assert calculate_checkout_quantity(lines) == 1
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_lines_update_with_unavailable_variant(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
assert checkout.lines.count() == 1
line = checkout.lines.first()
variant = line.variant
variant.channel_listings.filter(channel=checkout_with_item.channel).update(
price_amount=None
)
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesUpdate"]["errors"]
assert errors[0]["code"] == CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL.name
assert errors[0]["field"] == "lines"
assert errors[0]["variants"] == [variant_id]
def test_checkout_lines_update_channel_without_shipping_zones(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
checkout.channel.shipping_zones.clear()
assert checkout.lines.count() == 1
line = checkout.lines.first()
variant = line.variant
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
errors = data["errors"]
assert len(errors) == 1
assert errors[0]["code"] == CheckoutErrorCode.INSUFFICIENT_STOCK.name
assert errors[0]["field"] == "quantity"
def test_checkout_lines_update_variant_quantity_over_avability_stock(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
current_stock = line.variant.stocks.first()
line.quantity = current_stock.quantity - 1
line.save()
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": current_stock.quantity - 2}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert data["checkout"]["lines"][0]["quantity"] == variables["lines"][0]["quantity"]
def test_checkout_lines_delete_with_by_zero_quantity_when_variant_out_of_stock(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
stock = line.variant.stocks.first()
stock.quantity = 0
stock.save(update_fields=["quantity"])
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 0}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["checkout"]["lines"]
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_line_delete_by_zero_quantity(
mocked_update_shipping_method, user_api_client, checkout_with_item
):
checkout = checkout_with_item
assert checkout.lines.count() == 1
line = checkout.lines.first()
variant = line.variant
assert line.quantity == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 0}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.lines.count() == 0
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_line_update_by_zero_quantity_dont_create_new_lines(
mocked_update_shipping_method,
user_api_client,
checkout_with_item,
):
checkout = checkout_with_item
line = checkout.lines.first()
variant = line.variant
checkout.lines.all().delete()
assert checkout.lines.count() == 0
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 0}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.lines.count() == 0
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_lines_update_with_unpublished_product(
user_api_client, checkout_with_item, channel_USD
):
checkout = checkout_with_item
line = checkout.lines.first()
variant = line.variant
product = variant.product
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
error = content["data"]["checkoutLinesUpdate"]["errors"][0]
assert error["code"] == CheckoutErrorCode.PRODUCT_NOT_PUBLISHED.name
def test_checkout_lines_update_invalid_checkout_id(user_api_client):
variables = {"token": uuid.uuid4(), "lines": []}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert data["errors"][0]["field"] == "token"
def test_checkout_lines_update_check_lines_quantity(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
variant = line.variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 11}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert data["errors"][0]["message"] == (
"Could not add items 123. Only 10 remaining in stock."
)
assert data["errors"][0]["field"] == "quantity"
def test_checkout_lines_update_with_chosen_shipping(
user_api_client, checkout, stock, address, shipping_method
):
checkout.shipping_address = address
checkout.shipping_method = shipping_method
checkout.save()
variant = stock.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
lines = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 1
MUTATION_CHECKOUT_LINE_DELETE = """
mutation checkoutLineDelete($token: UUID, $lineId: ID!) {
checkoutLineDelete(token: $token, lineId: $lineId) {
checkout {
token
lines {
quantity
variant {
id
}
}
}
errors {
field
message
}
}
}
"""
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_line_delete(
mocked_update_shipping_method, user_api_client, checkout_with_item
):
checkout = checkout_with_item
lines = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
assert checkout.lines.count() == 1
line = checkout.lines.first()
assert line.quantity == 3
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"token": checkout.token, "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINE_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout.refresh_from_db()
lines = fetch_checkout_lines(checkout)
assert checkout.lines.count() == 0
assert calculate_checkout_quantity(lines) == 0
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
MUTATION_CHECKOUT_LINES_DELETE = """
mutation checkoutLinesDelete($token: UUID!, $linesIds: [ID]!) {
checkoutLinesDelete(token: $token, linesIds: $linesIds) {
checkout {
token
lines {
id
quantity
variant {
id
}
}
}
errors {
message
code
field
lines
}
}
}
"""
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_lines_delete(
mocked_update_shipping_method, user_api_client, checkout_with_items
):
checkout = checkout_with_items
checkout_lines_count = checkout.lines.count()
line = checkout.lines.first()
second_line = checkout.lines.last()
first_line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
second_line_id = graphene.Node.to_global_id("CheckoutLine", second_line.pk)
lines_list = [first_line_id, second_line_id]
variables = {"token": checkout.token, "linesIds": lines_list}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesDelete"]
assert not data["errors"]
checkout.refresh_from_db()
lines = fetch_checkout_lines(checkout)
assert checkout.lines.count() + len(lines_list) == checkout_lines_count
remaining_lines = data["checkout"]["lines"]
lines_ids = [line["id"] for line in remaining_lines]
assert lines_list not in lines_ids
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_lines_delete_invalid_checkout_token(
user_api_client, checkout_with_items
):
checkout = checkout_with_items
line = checkout.lines.first()
second_line = checkout.lines.last()
first_line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
second_line_id = graphene.Node.to_global_id("CheckoutLine", second_line.pk)
lines_list = [first_line_id, second_line_id]
variables = {
"token": "bd159cc8-9dd6-4529-a6f6-8a5dee169806",
"linesIds": lines_list,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesDelete"]["errors"][0]
assert errors["code"] == CheckoutErrorCode.NOT_FOUND.name
def tests_checkout_lines_delete_invalid_lines_ids(user_api_client, checkout_with_items):
checkout = checkout_with_items
line = checkout.lines.first()
first_line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
lines_list = [first_line_id, "Q2hlY2tvdXRMaW5lOjE8"]
variables = {"token": checkout.token, "linesIds": lines_list}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesDelete"]["errors"][0]
assert errors["code"] == CheckoutErrorCode.INVALID.name
assert errors["lines"] == lines_list[1:]
assert errors["field"] == "lineId"
| 35.063107
| 88
| 0.696317
| 3,255
| 28,892
| 5.851613
| 0.056836
| 0.082585
| 0.040951
| 0.026041
| 0.879509
| 0.857405
| 0.851473
| 0.84302
| 0.833832
| 0.817767
| 0
| 0.005594
| 0.19559
| 28,892
| 823
| 89
| 35.105711
| 0.813949
| 0.001073
| 0
| 0.717201
| 0
| 0
| 0.182228
| 0.024815
| 0
| 0
| 0
| 0
| 0.132653
| 1
| 0.043732
| false
| 0
| 0.016035
| 0
| 0.059767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c90eb25c2c8db8b281a5723f1554729c418814d
| 423
|
bzl
|
Python
|
cc_module/defs.bzl
|
KennyS/rules_cc_module
|
676a4786f00b67f5026266827f13d094694ad44d
|
[
"Apache-2.0"
] | null | null | null |
cc_module/defs.bzl
|
KennyS/rules_cc_module
|
676a4786f00b67f5026266827f13d094694ad44d
|
[
"Apache-2.0"
] | null | null | null |
cc_module/defs.bzl
|
KennyS/rules_cc_module
|
676a4786f00b67f5026266827f13d094694ad44d
|
[
"Apache-2.0"
] | null | null | null |
load("//cc_module/private:cc_module.bzl",
_cc_module = "cc_module",
_cc_header_module = "cc_header_module",
_cc_module_binary = "cc_module_binary",
_cc_module_library = "cc_module_library",
_cc_module_test = "cc_module_test",
)
cc_module = _cc_module
cc_header_module = _cc_header_module
cc_module_binary = _cc_module_binary
cc_module_library = _cc_module_library
cc_module_test = _cc_module_test
| 30.214286
| 46
| 0.780142
| 63
| 423
| 4.47619
| 0.142857
| 0.510638
| 0.141844
| 0.283688
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0
| 0
| 0.137116
| 423
| 13
| 47
| 32.538462
| 0.772603
| 0
| 0
| 0
| 0
| 0
| 0.248227
| 0.078014
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7cda254872582e1ae2c5413950517a59901da552
| 482
|
py
|
Python
|
python-bindings/rust_tokenizers/__init__.py
|
mfelsche/rust-tokenizers
|
5a7860daf35c24f6b9ac8022c10f5883a29f0993
|
[
"Apache-2.0"
] | null | null | null |
python-bindings/rust_tokenizers/__init__.py
|
mfelsche/rust-tokenizers
|
5a7860daf35c24f6b9ac8022c10f5883a29f0993
|
[
"Apache-2.0"
] | null | null | null |
python-bindings/rust_tokenizers/__init__.py
|
mfelsche/rust-tokenizers
|
5a7860daf35c24f6b9ac8022c10f5883a29f0993
|
[
"Apache-2.0"
] | null | null | null |
from .rust_tokenizers import PyBertTokenizer, PyCtrlTokenizer, PyGpt2Tokenizer, PyRobertaTokenizer, \
PyOpenAiGptTokenizer, PySentencePieceTokenizer, PyAlbertTokenizer, PyT5Tokenizer, PyXLMRobertaTokenizer, \
PyXLNetTokenizer
__all__ = ["PyBertTokenizer", "PyCtrlTokenizer", "PyGpt2Tokenizer", "PyRobertaTokenizer",
"PyOpenAiGptTokenizer", "PySentencePieceTokenizer", "PyAlbertTokenizer", "PyT5Tokenizer",
"PyXLMRobertaTokenizer", "PyXLNetTokenizer"]
| 60.25
| 110
| 0.788382
| 25
| 482
| 15
| 0.6
| 0.16
| 0.24
| 0.336
| 0.928
| 0.928
| 0.928
| 0.928
| 0.928
| 0.928
| 0
| 0.009434
| 0.120332
| 482
| 7
| 111
| 68.857143
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.360996
| 0.093361
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
6b3cc14abb35adbf9db83a897d561fa8b5dd0648
| 5,715
|
py
|
Python
|
healthcare/backends/djhealth/migrations/0001_initial.py
|
caktus/rapidsms-healthcare
|
0effdb2036129702c15530510633561d0c43d6d4
|
[
"BSD-3-Clause"
] | 9
|
2015-08-31T09:22:28.000Z
|
2019-04-27T04:06:00.000Z
|
healthcare/backends/djhealth/migrations/0001_initial.py
|
caktus/rapidsms-healthcare
|
0effdb2036129702c15530510633561d0c43d6d4
|
[
"BSD-3-Clause"
] | null | null | null |
healthcare/backends/djhealth/migrations/0001_initial.py
|
caktus/rapidsms-healthcare
|
0effdb2036129702c15530510633561d0c43d6d4
|
[
"BSD-3-Clause"
] | 7
|
2015-09-17T00:56:39.000Z
|
2020-03-14T11:08:17.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Patient'
db.create_table('djhealth_patient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default=u'A', max_length=1)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('sex', self.gf('django.db.models.fields.CharField')(default=u'', max_length=1, blank=True)),
('birth_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('death_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('location', self.gf('django.db.models.fields.CharField')(default=u'', max_length=512, blank=True)),
))
db.send_create_signal('djhealth', ['Patient'])
# Adding model 'Provider'
db.create_table('djhealth_provider', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default=u'A', max_length=1)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('location', self.gf('django.db.models.fields.CharField')(default=u'', max_length=512, blank=True)),
))
db.send_create_signal('djhealth', ['Provider'])
# Adding model 'PatientID'
db.create_table('djhealth_patientid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default=u'A', max_length=1)),
('uid', self.gf('django.db.models.fields.CharField')(max_length=255)),
('patient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djhealth.Patient'])),
('source', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal('djhealth', ['PatientID'])
def backwards(self, orm):
# Deleting model 'Patient'
db.delete_table('djhealth_patient')
# Deleting model 'Provider'
db.delete_table('djhealth_provider')
# Deleting model 'PatientID'
db.delete_table('djhealth_patientid')
models = {
'djhealth.patient': {
'Meta': {'object_name': 'Patient'},
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'death_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '512', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sex': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'A'", 'max_length': '1'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'djhealth.patientid': {
'Meta': {'object_name': 'PatientID'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djhealth.Patient']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'A'", 'max_length': '1'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'djhealth.provider': {
'Meta': {'object_name': 'Provider'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '512', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'A'", 'max_length': '1'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['djhealth']
| 60.797872
| 124
| 0.592651
| 655
| 5,715
| 5.045802
| 0.109924
| 0.108926
| 0.186384
| 0.266263
| 0.809077
| 0.798185
| 0.796369
| 0.796369
| 0.770651
| 0.755522
| 0
| 0.009937
| 0.190026
| 5,715
| 94
| 125
| 60.797872
| 0.70404
| 0.029921
| 0
| 0.466667
| 0
| 0
| 0.467221
| 0.27831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.053333
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8608b52a3fd2a0521bcfe5357b6b4ca1309bf847
| 215
|
py
|
Python
|
ovl/helpers/function_description.py
|
ofekashery/Ovl-Python
|
117e3f4ae1a8a5624c41792bd36b760afbe86c8e
|
[
"Apache-2.0"
] | 1
|
2020-10-11T16:14:46.000Z
|
2020-10-11T16:14:46.000Z
|
ovl/utils/function_description.py
|
frc1937/ovl
|
1954edf0ab946dbb42d90eba1dac97eeb157c567
|
[
"Apache-2.0"
] | 1
|
2020-10-18T05:00:06.000Z
|
2020-12-24T20:03:44.000Z
|
ovl/utils/function_description.py
|
frc1937/ovl
|
1954edf0ab946dbb42d90eba1dac97eeb157c567
|
[
"Apache-2.0"
] | null | null | null |
def function_description(function):
return ("Function Name: {function_name}\n"
"documentation:\n{documentation}"
.format(function_name=function.__name__, documentation=function.__doc__))
| 43
| 85
| 0.711628
| 21
| 215
| 6.761905
| 0.428571
| 0.338028
| 0.28169
| 0.338028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172093
| 215
| 4
| 86
| 53.75
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0.293023
| 0.144186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
8608d063ca17e5c50db162aa2377b3972cfeb078
| 182,885
|
py
|
Python
|
tests/hwsim/test_dpp.py
|
cargoudel/hostap-15118-8
|
717f236dce3ed571e9a61fcce86d8a37a7f7bc1b
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/test_dpp.py
|
cargoudel/hostap-15118-8
|
717f236dce3ed571e9a61fcce86d8a37a7f7bc1b
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/test_dpp.py
|
cargoudel/hostap-15118-8
|
717f236dce3ed571e9a61fcce86d8a37a7f7bc1b
|
[
"Unlicense"
] | null | null | null |
# Test cases for Device Provisioning Protocol (DPP)
# Copyright (c) 2017, Qualcomm Atheros, Inc.
# Copyright (c) 2018, The Linux Foundation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import subprocess
import time
import hostapd
import hwsim_utils
from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger
from wpasupplicant import WpaSupplicant
def check_dpp_capab(dev, brainpool=False):
if "UNKNOWN COMMAND" in dev.request("DPP_BOOTSTRAP_GET_URI 0"):
raise HwsimSkip("DPP not supported")
if brainpool:
tls = dev.request("GET tls_library")
if not tls.startswith("OpenSSL") or "run=BoringSSL" in tls:
raise HwsimSkip("Crypto library does not support Brainpool curves: " + tls)
def test_dpp_qr_code_parsing(dev, apdev):
"""DPP QR Code parsing"""
check_dpp_capab(dev[0])
id = []
tests = [ "DPP:C:81/1,115/36;K:MDkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDIgADM2206avxHJaHXgLMkq/24e0rsrfMP9K1Tm8gx+ovP0I=;;",
"DPP:I:SN=4774LH2b4044;M:010203040506;K:MDkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDIgADURzxmttZoIRIPWGoQMV00XHWCAQIhXruVWOz0NjlkIA=;;",
"DPP:I:;M:010203040506;K:MDkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDIgADURzxmttZoIRIPWGoQMV00XHWCAQIhXruVWOz0NjlkIA=;;" ]
for uri in tests:
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code")
id.append(int(res))
uri2 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id[-1])
if uri != uri2:
raise Exception("Returned URI does not match")
tests = [ "foo",
"DPP:",
"DPP:;;",
"DPP:C:1/2;M:;K;;",
"DPP:I:;M:01020304050;K:MDkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDIgADURzxmttZoIRIPWGoQMV00XHWCAQIhXruVWOz0NjlkIA=;;" ]
for t in tests:
res = dev[0].request("DPP_QR_CODE " + t)
if "FAIL" not in res:
raise Exception("Accepted invalid QR Code: " + t)
logger.info("ID: " + str(id))
if id[0] == id[1] or id[0] == id[2] or id[1] == id[2]:
raise Exception("Duplicate ID returned")
if "FAIL" not in dev[0].request("DPP_BOOTSTRAP_REMOVE 12345678"):
raise Exception("DPP_BOOTSTRAP_REMOVE accepted unexpectedly")
if "OK" not in dev[0].request("DPP_BOOTSTRAP_REMOVE %d" % id[1]):
raise Exception("DPP_BOOTSTRAP_REMOVE failed")
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode")
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % int(res))
logger.info("Generated URI: " + uri)
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse self-generated QR Code URI")
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1,115/36 mac=010203040506 info=foo")
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % int(res))
logger.info("Generated URI: " + uri)
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse self-generated QR Code URI")
dpp_key_p256 ="30570201010420777fc55dc51e967c10ec051b91d860b5f1e6c934e48d5daffef98d032c64b170a00a06082a8648ce3d030107a124032200020c804188c7f85beb6e91070d2b3e5e39b90ca77b4d3c5251bc1844d6ca29dcad"
dpp_key_p384 = "307402010104302f56fdd83b5345cacb630eb7c22fa5ad5daba37307c95191e2a75756d137003bd8b32dbcb00eb5650c1eb499ecfcaec0a00706052b81040022a13403320003615ec2141b5b77aebb6523f8a012755f9a34405a8398d2ceeeebca7f5ce868bf55056cba4c4ec62fad3ed26dd29e0f23"
dpp_key_p521 = "308198020101044200c8010d5357204c252551aaf4e210343111e503fd1dc615b257058997c49b6b643c975226e93be8181cca3d83a7072defd161dfbdf433c19abe1f2ad51867a05761a00706052b81040023a1460344000301cdf3608b1305fe34a1f976095dcf001182b9973354efe156291a66830292f9babd8f412ad462958663e7a75d1d0610abdfc3dd95d40669f7ab3bc001668cfb3b7c"
dpp_key_bp256 = "3058020101042057133a676fb60bf2a3e6797e19833c7b0f89dc192ab99ab5fa377ae23a157765a00b06092b2403030208010107a12403220002945d9bf7ce30c9c1ac0ff21ca62b984d5bb80ff69d2be8c9716ab39a10d2caf0"
dpp_key_bp384 = "307802010104304902df9f3033a9b7128554c0851dc7127c3573eed150671dae74c0013e9896a9b1c22b6f7d43d8a2ebb7cd474dc55039a00b06092b240303020801010ba13403320003623cb5e68787f351faababf3425161571560add2e6f9a306fcbffb507735bf955bb46dd20ba246b0d5cadce73e5bd6a6"
dpp_key_bp512 = "30819802010104405803494226eb7e50bf0e90633f37e7e35d33f5fa502165eeba721d927f9f846caf12e925701d18e123abaaaf4a7edb4fc4de21ce18bc10c4d12e8b3439f74e40a00b06092b240303020801010da144034200033b086ccd47486522d35dc16fbb2229642c2e9e87897d45abbf21f9fb52acb5a6272b31d1b227c3e53720769cc16b4cb181b26cd0d35fe463218aaedf3b6ec00a"
def test_dpp_qr_code_curves(dev, apdev):
"""DPP QR Code and supported curves"""
check_dpp_capab(dev[0])
tests = [ ("prime256v1", dpp_key_p256),
("secp384r1", dpp_key_p384),
("secp521r1", dpp_key_p521) ]
for curve, hex in tests:
id = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode key=" + hex)
if "FAIL" in id:
raise Exception("Failed to set key for " + curve)
info = dev[0].request("DPP_BOOTSTRAP_INFO " + id)
if "FAIL" in info:
raise Exception("Failed to get info for " + curve)
if "curve=" + curve not in info:
raise Exception("Curve mismatch for " + curve)
def test_dpp_qr_code_curves_brainpool(dev, apdev):
"""DPP QR Code and supported Brainpool curves"""
check_dpp_capab(dev[0], brainpool=True)
tests = [ ("brainpoolP256r1", dpp_key_bp256),
("brainpoolP384r1", dpp_key_bp384),
("brainpoolP512r1", dpp_key_bp512) ]
for curve, hex in tests:
id = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode key=" + hex)
if "FAIL" in id:
raise Exception("Failed to set key for " + curve)
info = dev[0].request("DPP_BOOTSTRAP_INFO " + id)
if "FAIL" in info:
raise Exception("Failed to get info for " + curve)
if "curve=" + curve not in info:
raise Exception("Curve mismatch for " + curve)
def test_dpp_qr_code_curve_select(dev, apdev):
"""DPP QR Code and curve selection"""
check_dpp_capab(dev[0], brainpool=True)
check_dpp_capab(dev[1], brainpool=True)
addr = dev[0].own_addr().replace(':', '')
bi = []
for key in [ dpp_key_p256, dpp_key_p384, dpp_key_p521,
dpp_key_bp256, dpp_key_bp384, dpp_key_bp512 ]:
id = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr + " key=" + key)
if "FAIL" in id:
raise Exception("Failed to set key for " + curve)
info = dev[0].request("DPP_BOOTSTRAP_INFO " + id)
for i in info.splitlines():
if '=' in i:
name, val = i.split('=')
if name == "curve":
curve = val
break
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI " + id)
bi.append((curve, uri))
for curve, uri in bi:
logger.info("Curve: " + curve)
logger.info("URI: " + uri)
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=" + res):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-FAILED"], timeout=2)
if ev is None:
raise Exception("DPP configuration result not seen (Enrollee)")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=2)
if ev is None:
raise Exception("DPP configuration result not seen (Responder)")
dev[0].request("DPP_STOP_LISTEN")
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_dpp_qr_code_auth_broadcast(dev, apdev):
"""DPP QR Code and authentication exchange (broadcast)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1")
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_unicast(dev, apdev):
"""DPP QR Code and authentication exchange (unicast)"""
run_dpp_qr_code_auth_unicast(dev, apdev, None)
def test_dpp_qr_code_auth_unicast_ap_enrollee(dev, apdev):
"""DPP QR Code and authentication exchange (AP enrollee)"""
run_dpp_qr_code_auth_unicast(dev, apdev, None, netrole="ap")
def test_dpp_qr_code_curve_prime256v1(dev, apdev):
"""DPP QR Code and curve prime256v1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1")
def test_dpp_qr_code_curve_secp384r1(dev, apdev):
"""DPP QR Code and curve secp384r1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1")
def test_dpp_qr_code_curve_secp521r1(dev, apdev):
"""DPP QR Code and curve secp521r1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1")
def test_dpp_qr_code_curve_brainpoolP256r1(dev, apdev):
"""DPP QR Code and curve brainpoolP256r1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "brainpoolP256r1")
def test_dpp_qr_code_curve_brainpoolP384r1(dev, apdev):
"""DPP QR Code and curve brainpoolP384r1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "brainpoolP384r1")
def test_dpp_qr_code_curve_brainpoolP512r1(dev, apdev):
"""DPP QR Code and curve brainpoolP512r1"""
run_dpp_qr_code_auth_unicast(dev, apdev, "brainpoolP512r1")
def test_dpp_qr_code_set_key(dev, apdev):
"""DPP QR Code and fixed bootstrapping key"""
run_dpp_qr_code_auth_unicast(dev, apdev, None, key="30770201010420e5143ac74682cc6869a830e8f5301a5fa569130ac329b1d7dd6f2a7495dbcbe1a00a06082a8648ce3d030107a144034200045e13e167c33dbc7d85541e5509600aa8139bbb3e39e25898992c5d01be92039ee2850f17e71506ded0d6b25677441eae249f8e225c68dd15a6354dca54006383")
def run_dpp_qr_code_auth_unicast(dev, apdev, curve, netrole=None, key=None,
require_conf_success=False, init_extra=None,
require_conf_failure=False,
configurator=False, conf_curve=None):
check_dpp_capab(dev[0], curve and "brainpool" in curve)
check_dpp_capab(dev[1], curve and "brainpool" in curve)
if configurator:
logger.info("Create configurator on dev1")
cmd = "DPP_CONFIGURATOR_ADD"
if conf_curve:
cmd += " curve=" + conf_curve
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
if curve:
cmd += " curve=" + curve
if key:
cmd += " key=" + key
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
cmd = "DPP_LISTEN 2412"
if netrole:
cmd += " netrole=" + netrole
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d" % id1
if init_extra:
cmd += " " + init_extra
if configurator:
cmd += " configurator=%d" % conf_id
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED", "DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
if require_conf_success:
if "DPP-CONF-FAILED" in ev:
raise Exception("DPP configuration failed")
if require_conf_failure:
if "DPP-CONF-SUCCESS" in ev:
raise Exception("DPP configuration succeeded unexpectedly")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_dpp_qr_code_auth_mutual(dev, apdev):
"""DPP QR Code and authentication exchange (mutual)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-AUTH-DIRECTION"], timeout=5)
if ev is None:
raise Exception("DPP authentication direction not indicated (Initiator)")
if "mutual=1" not in ev:
raise Exception("Mutual authentication not used")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_mutual2(dev, apdev):
"""DPP QR Code and authentication exchange (mutual2)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412 qr=mutual"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("Pending response not reported")
ev = dev[0].wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
ev = dev[1].wait_event(["DPP-AUTH-DIRECTION"], timeout=5)
if ev is None:
raise Exception("DPP authentication direction not indicated (Initiator)")
if "mutual=1" not in ev:
raise Exception("Mutual authentication not used")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_mutual_p_256(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen P-256)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "P-256")
def test_dpp_qr_code_auth_mutual_p_384(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen P-384)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "P-384")
def test_dpp_qr_code_auth_mutual_p_521(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen P-521)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "P-521")
def test_dpp_qr_code_auth_mutual_bp_256(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen BP-256)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "BP-256")
def test_dpp_qr_code_auth_mutual_bp_384(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen BP-384)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "BP-384")
def test_dpp_qr_code_auth_mutual_bp_512(dev, apdev):
"""DPP QR Code and authentication exchange (mutual, autogen BP-512)"""
run_dpp_qr_code_auth_mutual(dev, apdev, "BP-512")
def run_dpp_qr_code_auth_mutual(dev, apdev, curve):
check_dpp_capab(dev[0], curve and "BP-" in curve)
check_dpp_capab(dev[1], curve and "BP-" in curve)
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
cmd += " curve=" + curve
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412 qr=mutual"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d" % (id1)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("Pending response not reported")
uri = ev.split(' ')[1]
ev = dev[0].wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
ev = dev[1].wait_event(["DPP-AUTH-DIRECTION"], timeout=5)
if ev is None:
raise Exception("DPP authentication direction not indicated (Initiator)")
if "mutual=1" not in ev:
raise Exception("Mutual authentication not used")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_auth_resp_retries(dev, apdev):
"""DPP Authentication Response retries"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[0].set("dpp_resp_max_tries", "3")
dev[0].set("dpp_resp_retry_time", "100")
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412 qr=mutual"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("Pending response not reported")
ev = dev[0].wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
# Stop Initiator from listening to frames to force retransmission of the
# DPP Authentication Response frame with Status=0
dev[1].request("DPP_STOP_LISTEN")
dev[1].dump_monitor()
dev[0].dump_monitor()
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
ev = dev[0].wait_event(["DPP-TX"], timeout=5)
if ev is None or "type=1" not in ev:
raise Exception("DPP Authentication Response not sent")
ev = dev[0].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("TX status for DPP Authentication Response not reported")
if "result=no-ACK" not in ev:
raise Exception("Unexpected TX status for Authentication Response: " + ev)
ev = dev[0].wait_event(["DPP-TX"], timeout=15)
if ev is None or "type=1" not in ev:
raise Exception("DPP Authentication Response retransmission not sent")
def test_dpp_qr_code_auth_mutual_not_used(dev, apdev):
"""DPP QR Code and authentication exchange (mutual not used)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev0 does not scan QR Code")
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-AUTH-DIRECTION"], timeout=5)
if ev is None:
raise Exception("DPP authentication direction not indicated (Initiator)")
if "mutual=0" not in ev:
raise Exception("Mutual authentication not used")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_mutual_curve_mismatch(dev, apdev):
"""DPP QR Code and authentication exchange (mutual/mismatch)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr + " curve=secp384r1")
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
res = dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b))
if "FAIL" not in res:
raise Exception("DPP_AUTH_INIT accepted unexpectedly")
def test_dpp_qr_code_auth_hostapd_mutual2(dev, apdev):
"""DPP QR Code and authentication exchange (hostapd mutual2)"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
logger.info("AP displays QR Code")
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id_h = int(res)
uri_h = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id_h)
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri_h)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0 = int(res)
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0b = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0b)
logger.info("dev0 initiates DPP Authentication")
if "OK" not in hapd.request("DPP_LISTEN 2412 qr=mutual"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[0].request("DPP_AUTH_INIT peer=%d own=%d" % (id0, id0b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("Pending response not reported")
ev = hapd.wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
logger.info("AP scans QR Code")
res = hapd.request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
hapd.request("DPP_STOP_LISTEN")
def test_dpp_qr_code_listen_continue(dev, apdev):
"""DPP QR Code and listen operation needing continuation"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
logger.info("Wait for listen to expire and get restarted")
time.sleep(5.5)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_initiator_enrollee(dev, apdev):
"""DPP QR Code and authentication exchange (Initiator in Enrollee role)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[0].request("SET gas_address3 1")
dev[1].request("SET gas_address3 1")
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d role=enrollee" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration did not succeed (Configurator)")
ev = dev[1].wait_event(["DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration did not succeed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_initiator_either_1(dev, apdev):
"""DPP QR Code and authentication exchange (Initiator in either role)"""
run_dpp_qr_code_auth_initiator_either(dev, apdev, None, dev[1], dev[0])
def test_dpp_qr_code_auth_initiator_either_2(dev, apdev):
"""DPP QR Code and authentication exchange (Initiator in either role)"""
run_dpp_qr_code_auth_initiator_either(dev, apdev, "enrollee",
dev[1], dev[0])
def test_dpp_qr_code_auth_initiator_either_3(dev, apdev):
"""DPP QR Code and authentication exchange (Initiator in either role)"""
run_dpp_qr_code_auth_initiator_either(dev, apdev, "configurator",
dev[0], dev[1])
def run_dpp_qr_code_auth_initiator_either(dev, apdev, resp_role,
conf_dev, enrollee_dev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
cmd = "DPP_LISTEN 2412"
if resp_role:
cmd += " role=" + resp_role
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d role=either" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = conf_dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration did not succeed (Configurator)")
ev = enrollee_dev.wait_event(["DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration did not succeed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_incompatible_roles(dev, apdev):
"""DPP QR Code and authentication exchange (incompatible roles)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412 role=enrollee"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d role=enrollee" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-NOT-COMPATIBLE"], timeout=5)
if ev is None:
raise Exception("DPP-NOT-COMPATIBLE event on initiator timed out")
ev = dev[0].wait_event(["DPP-NOT-COMPATIBLE"], timeout=1)
if ev is None:
raise Exception("DPP-NOT-COMPATIBLE event on responder timed out")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d role=configurator" % id1):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
dev[0].request("DPP_STOP_LISTEN")
def test_dpp_qr_code_auth_neg_chan(dev, apdev):
"""DPP QR Code and authentication exchange with requested different channel"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("Create configurator on dev1")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d configurator=%d conf=sta-dpp neg_freq=2462" % (id1, conf_id)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-TX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Request not sent")
if "freq=2412 type=0" not in ev:
raise Exception("Unexpected TX data for Authentication Request: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Request not received")
if "freq=2412 type=0" not in ev:
raise Exception("Unexpected RX data for Authentication Request: " + ev)
ev = dev[1].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("TX status for DPP Authentication Request not reported")
if "freq=2412 result=SUCCESS" not in ev:
raise Exception("Unexpected TX status for Authentication Request: " + ev)
ev = dev[0].wait_event(["DPP-TX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Response not sent")
if "freq=2462 type=1" not in ev:
raise Exception("Unexpected TX data for Authentication Response: " + ev)
ev = dev[1].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Response not received")
if "freq=2462 type=1" not in ev:
raise Exception("Unexpected RX data for Authentication Response: " + ev)
ev = dev[0].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("TX status for DPP Authentication Response not reported")
if "freq=2462 result=SUCCESS" not in ev:
raise Exception("Unexpected TX status for Authentication Response: " + ev)
ev = dev[1].wait_event(["DPP-TX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Confirm not sent")
if "freq=2462 type=2" not in ev:
raise Exception("Unexpected TX data for Authentication Confirm: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Confirm not received")
if "freq=2462 type=2" not in ev:
raise Exception("Unexpected RX data for Authentication Confirm: " + ev)
ev = dev[1].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("TX status for DPP Authentication Confirm not reported")
if "freq=2462 result=SUCCESS" not in ev:
raise Exception("Unexpected TX status for Authentication Confirm: " + ev)
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED", "DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
if "DPP-CONF-FAILED" in ev:
raise Exception("DPP configuration failed")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_dpp_config_legacy(dev, apdev):
"""DPP Config Object for legacy network using passphrase"""
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra", "discovery":{"ssid":"test"},"cred":{"akm":"psk","pass":"secret passphrase"}}'
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
require_conf_success=True)
def test_dpp_config_legacy_psk_hex(dev, apdev):
"""DPP Config Object for legacy network using PSK"""
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra", "discovery":{"ssid":"test"},"cred":{"akm":"psk","psk_hex":"' + 32*"12" + '"}}'
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
require_conf_success=True)
def test_dpp_config_fragmentation(dev, apdev):
"""DPP Config Object for legacy network requiring fragmentation"""
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra", "discovery":{"ssid":"test"},"cred":{"akm":"psk","pass":"secret passphrase"}}' + 3000*' '
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
require_conf_success=True)
def test_dpp_config_legacy_gen(dev, apdev):
"""Generate DPP Config Object for legacy network"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-psk pass=%s" % "passphrase".encode("hex"),
require_conf_success=True)
def test_dpp_config_legacy_gen_psk(dev, apdev):
"""Generate DPP Config Object for legacy network (PSK)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-psk psk=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
require_conf_success=True)
def test_dpp_config_dpp_gen_prime256v1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-256)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True)
def test_dpp_config_dpp_gen_secp384r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-384)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True)
def test_dpp_config_dpp_gen_secp521r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-521)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True)
def test_dpp_config_dpp_gen_prime256v1_prime256v1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-256 + P-256)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="prime256v1")
def test_dpp_config_dpp_gen_prime256v1_secp384r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-256 + P-384)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp384r1")
def test_dpp_config_dpp_gen_prime256v1_secp521r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-256 + P-521)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp521r1")
def test_dpp_config_dpp_gen_secp384r1_prime256v1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-384 + P-256)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="prime256v1")
def test_dpp_config_dpp_gen_secp384r1_secp384r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-384 + P-384)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp384r1")
def test_dpp_config_dpp_gen_secp384r1_secp521r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-384 + P-521)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp521r1")
def test_dpp_config_dpp_gen_secp521r1_prime256v1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-521 + P-256)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="prime256v1")
def test_dpp_config_dpp_gen_secp521r1_secp384r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-521 + P-384)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp384r1")
def test_dpp_config_dpp_gen_secp521r1_secp521r1(dev, apdev):
"""Generate DPP Config Object for DPP network (P-521 + P-521)"""
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True,
conf_curve="secp521r1")
def test_dpp_config_dpp_gen_expiry(dev, apdev):
"""Generate DPP Config Object for DPP network with expiry value"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp expiry=%d" % (time.time() + 1000),
require_conf_success=True,
configurator=True)
def test_dpp_config_dpp_gen_expired_key(dev, apdev):
"""Generate DPP Config Object for DPP network with expiry value"""
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp expiry=%d" % (time.time() - 10),
require_conf_failure=True,
configurator=True)
def test_dpp_config_dpp_override_prime256v1(dev, apdev):
"""DPP Config Object override (P-256)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra","discovery":{"ssid":"test"},"cred":{"akm":"dpp","signedConnector":"eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJUbkdLaklsTlphYXRyRUFZcmJiamlCNjdyamtMX0FHVldYTzZxOWhESktVIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6InN0YSJ9XSwibmV0QWNjZXNzS2V5Ijp7Imt0eSI6IkVDIiwiY3J2IjoiUC0yNTYiLCJ4IjoiYVRGNEpFR0lQS1NaMFh2OXpkQ01qbS10bjVYcE1zWUlWWjl3eVNBejFnSSIsInkiOiJRR2NIV0FfNnJiVTlYRFhBenRvWC1NNVEzc3VUbk1hcUVoVUx0bjdTU1h3In19._sm6YswxMf6hJLVTyYoU1uYUeY2VVkUNjrzjSiEhY42StD_RWowStEE-9CRsdCvLmsTptZ72_g40vTFwdId20A","csign":{"kty":"EC","crv":"P-256","x":"W4-Y5N1Pkos3UWb9A5qme0KUYRtY3CVUpekx_MapZ9s","y":"Et-M4NSF4NGjvh2VCh4B1sJ9eSCZ4RNzP2DBdP137VE","kid":"TnGKjIlNZaatrEAYrbbjiB67rjkL_AGVWXO6q9hDJKU"}}}'
dev[0].set("dpp_ignore_netaccesskey_mismatch", "1")
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
require_conf_success=True)
def test_dpp_config_dpp_override_secp384r1(dev, apdev):
"""DPP Config Object override (P-384)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra","discovery":{"ssid":"test"},"cred":{"akm":"dpp","signedConnector":"eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJabi1iMndjbjRLM2pGQklkYmhGZkpVTHJTXzdESS0yMWxFQi02R3gxNjl3IiwiYWxnIjoiRVMzODQifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6InN0YSJ9XSwibmV0QWNjZXNzS2V5Ijp7Imt0eSI6IkVDIiwiY3J2IjoiUC0zODQiLCJ4IjoickdrSGg1UUZsOUtfWjdqYUZkVVhmbThoY1RTRjM1b25Xb1NIRXVsbVNzWW9oX1RXZGpoRjhiVGdiS0ZRN2tBViIsInkiOiJBbU1QVDA5VmFENWpGdzMwTUFKQlp2VkZXeGNlVVlKLXR5blQ0bVJ5N0xOZWxhZ0dEWHpfOExaRlpOU2FaNUdLIn19.Yn_F7m-bbOQ5PlaYQJ9-1qsuqYQ6V-rAv8nWw1COKiCYwwbt3WFBJ8DljY0dPrlg5CHJC4saXwkytpI-CpELW1yUdzYb4Lrun07d20Eo_g10ICyOl5sqQCAUElKMe_Xr","csign":{"kty":"EC","crv":"P-384","x":"dmTyXXiPV2Y8a01fujL-jo08gvzyby23XmzOtzjAiujKQZZgPJsbhfEKrZDlc6ey","y":"H5Z0av5c7bqInxYb2_OOJdNiMhVf3zlcULR0516ZZitOY4U31KhL4wl4KGV7g2XW","kid":"Zn-b2wcn4K3jFBIdbhFfJULrS_7DI-21lEB-6Gx169w"}}}'
dev[0].set("dpp_ignore_netaccesskey_mismatch", "1")
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "secp384r1",
require_conf_success=True)
def test_dpp_config_dpp_override_secp521r1(dev, apdev):
"""DPP Config Object override (P-521)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
conf = '{"wi-fi_tech":"infra","discovery":{"ssid":"test"},"cred":{"akm":"dpp","signedConnector":"eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJMZkhKY3hnV2ZKcG1uS2IwenZRT0F2VDB2b0ZKc0JjZnBmYzgxY3Y5ZXFnIiwiYWxnIjoiRVM1MTIifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6InN0YSJ9XSwibmV0QWNjZXNzS2V5Ijp7Imt0eSI6IkVDIiwiY3J2IjoiUC01MjEiLCJ4IjoiQVJlUFBrMFNISkRRR2NWbnlmM3lfbTlaQllHNjFJeElIbDN1NkdwRHVhMkU1WVd4TE1BSUtMMnZuUGtlSGFVRXljRmZaZlpYZ2JlNkViUUxMVkRVUm1VUSIsInkiOiJBWUtaYlNwUkFFNjJVYm9YZ2c1ZWRBVENzbEpzTlpwcm9RR1dUcW9Md04weXkzQkVoT3ZRZmZrOWhaR2lKZ295TzFobXFRRVRrS0pXb2tIYTBCQUpLSGZtIn19.ACEZLyPk13cM_OFScpLoCElQ2t1sxq5z2d_W_3_QslTQQe5SFiH_o8ycL4632YLAH4RV0gZcMKKRMtZdHgBYHjkzASDqgY-_aYN2SBmpfl8hw0YdDlUJWX3DJf-ofqNAlTbnGmhpSg69cEAhFn41Xgvx2MdwYcPVncxxESVOtWl5zNLK","csign":{"kty":"EC","crv":"P-521","x":"ADiOI_YJOAipEXHB-SpGl4KqokX8m8h3BVYCc8dgiwssZ061-nIIY3O1SIO6Re4Jjfy53RPgzDG6jitOgOGLtzZs","y":"AZKggKaQi0ExutSpJAU3-lqDV03sBQLA9C7KabfWoAn8qD6Vk4jU0WAJdt-wBBTF9o1nVuiqS2OxMVYrxN4lOz79","kid":"LfHJcxgWfJpmnKb0zvQOAvT0voFJsBcfpfc81cv9eqg"}}}'
dev[0].set("dpp_ignore_netaccesskey_mismatch", "1")
dev[1].set("dpp_config_obj_override", conf)
run_dpp_qr_code_auth_unicast(dev, apdev, "secp521r1",
require_conf_success=True)
def test_dpp_config_override_objects(dev, apdev):
"""Generate DPP Config Object and override objects)"""
check_dpp_capab(dev[1])
discovery = '{\n"ssid":"mywifi"\n}'
groups = '[\n {"groupId":"home","netRole":"sta"},\n {"groupId":"cottage","netRole":"sta"}\n]'
dev[1].set("dpp_discovery_override", discovery)
dev[1].set("dpp_groups_override", groups)
run_dpp_qr_code_auth_unicast(dev, apdev, "prime256v1",
init_extra="conf=sta-dpp",
require_conf_success=True,
configurator=True)
def test_dpp_gas_timeout(dev, apdev):
"""DPP and GAS server timeout for a query"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
dev[0].set("ext_mgmt_frame_handling", "1")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
# Force GAS fragmentation
conf = '{"wi-fi_tech":"infra", "discovery":{"ssid":"test"},"cred":{"akm":"psk","pass":"secret passphrase"}}' + 3000*' '
dev[1].set("dpp_config_obj_override", conf)
cmd = "DPP_AUTH_INIT peer=%d" % id1
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
# DPP Authentication Request
msg = dev[0].mgmt_rx()
if "OK" not in dev[0].request("MGMT_RX_PROCESS freq={} datarate={} ssi_signal={} frame={}".format(msg['freq'], msg['datarate'], msg['ssi_signal'], msg['frame'].encode('hex'))):
raise Exception("MGMT_RX_PROCESS failed")
# DPP Authentication Confirmation
msg = dev[0].mgmt_rx()
if "OK" not in dev[0].request("MGMT_RX_PROCESS freq={} datarate={} ssi_signal={} frame={}".format(msg['freq'], msg['datarate'], msg['ssi_signal'], msg['frame'].encode('hex'))):
raise Exception("MGMT_RX_PROCESS failed")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
# DPP Configuration Response (GAS Initial Response frame)
msg = dev[0].mgmt_rx()
if "OK" not in dev[0].request("MGMT_RX_PROCESS freq={} datarate={} ssi_signal={} frame={}".format(msg['freq'], msg['datarate'], msg['ssi_signal'], msg['frame'].encode('hex'))):
raise Exception("MGMT_RX_PROCESS failed")
# GAS Comeback Response frame
msg = dev[0].mgmt_rx()
# Do not continue to force timeout on GAS server
ev = dev[0].wait_event(["GAS-QUERY-DONE"], timeout=10)
if ev is None:
raise Exception("GAS result not reported (Enrollee)")
if "result=TIMEOUT" not in ev:
raise Exception("Unexpected GAS result (Enrollee): " + ev)
dev[0].set("ext_mgmt_frame_handling", "0")
ev = dev[1].wait_event(["DPP-CONF-FAILED"], timeout=15)
if ev is None:
raise Exception("DPP configuration failure not reported (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-FAILED"], timeout=1)
if ev is None:
raise Exception("DPP configuration failure not reported (Enrollee)")
def test_dpp_akm_sha256(dev, apdev):
"""DPP AKM (SHA256)"""
run_dpp_akm(dev, apdev, 32)
def test_dpp_akm_sha384(dev, apdev):
"""DPP AKM (SHA384)"""
run_dpp_akm(dev, apdev, 48)
def test_dpp_akm_sha512(dev, apdev):
"""DPP AKM (SHA512)"""
run_dpp_akm(dev, apdev, 64)
def run_dpp_akm(dev, apdev, pmk_len):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
params = { "ssid": "dpp",
"wpa": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"ieee80211w": "2" }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
id = dev[0].connect("dpp", key_mgmt="DPP", ieee80211w="2", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND"], timeout=2)
if not ev:
raise Exception("Network mismatch not reported")
dev[0].request("DISCONNECT")
dev[0].dump_monitor()
bssid = hapd.own_addr()
pmkid = 16*'11'
akmp = 2**23
pmk = pmk_len*'22'
cmd = "PMKSA_ADD %d %s %s %s 30240 43200 %d 0" % (id, bssid, pmkid, pmk, akmp)
if "OK" not in dev[0].request(cmd):
raise Exception("PMKSA_ADD failed (wpa_supplicant)")
dev[0].select_network(id, freq="2412")
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=2)
dev[0].request("DISCONNECT")
dev[0].dump_monitor()
if not ev:
raise Exception("Association attempt was not rejected")
if "status_code=53" not in ev:
raise Exception("Unexpected status code: " + ev)
addr = dev[0].own_addr()
cmd = "PMKSA_ADD %s %s %s 0 %d" % (addr, pmkid, pmk, akmp)
if "OK" not in hapd.request(cmd):
raise Exception("PMKSA_ADD failed (hostapd)")
dev[0].select_network(id, freq="2412")
dev[0].wait_connected()
val = dev[0].get_status_field("key_mgmt")
if val != "DPP":
raise Exception("Unexpected key_mgmt: " + val)
def test_dpp_network_introduction(dev, apdev):
"""DPP network introduction"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "3059301306072a8648ce3d020106082a8648ce3d03010703420004d02e5bd81a120762b5f0f2994777f5d40297238a6c294fd575cdf35fabec44c050a6421c401d98d659fd2ed13c961cc8287944dd3202f516977800d3ab2f39ee"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJzOEFrYjg5bTV4UGhoYk5UbTVmVVo0eVBzNU5VMkdxYXNRY3hXUWhtQVFRIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIwOHF4TlNYRzRWemdCV3BjVUdNSmc1czNvbElOVFJsRVQ1aERpNkRKY3ZjIiwieSI6IlVhaGFYQXpKRVpRQk1YaHRUQnlZZVlrOWtJYjk5UDA3UV9NcW9TVVZTVEkifX0.a5_nfMVr7Qe1SW0ZL3u6oQRm5NUCYUSfixDAJOUFN3XUfECBZ6E8fm8xjeSfdOytgRidTz0CTlIRjzPQo82dmQ"
ap_netaccesskey = "30770201010420f6531d17f29dfab655b7c9e923478d5a345164c489aadd44a3519c3e9dcc792da00a06082a8648ce3d030107a14403420004d3cab13525c6e15ce0056a5c506309839b37a2520d4d19444f98438ba0c972f751a85a5c0cc911940131786d4c1c9879893d9086fdf4fd3b43f32aa125154932"
sta_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJzOEFrYjg5bTV4UGhoYk5UbTVmVVo0eVBzNU5VMkdxYXNRY3hXUWhtQVFRIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6InN0YSJ9XSwibmV0QWNjZXNzS2V5Ijp7Imt0eSI6IkVDIiwiY3J2IjoiUC0yNTYiLCJ4IjoiZWMzR3NqQ3lQMzVBUUZOQUJJdEltQnN4WXVyMGJZX1dES1lfSE9zUGdjNCIsInkiOiJTRS1HVllkdWVnTFhLMU1TQXZNMEx2QWdLREpTNWoyQVhCbE9PMTdUSTRBIn19.PDK9zsGlK-e1pEOmNxVeJfCS8pNeay6ckIS1TXCQsR64AR-9wFPCNVjqOxWvVKltehyMFqVAtOcv0IrjtMJFqQ"
sta_netaccesskey = "30770201010420bc33380c26fd2168b69cd8242ed1df07ba89aa4813f8d4e8523de6ca3f8dd28ba00a06082a8648ce3d030107a1440342000479cdc6b230b23f7e40405340048b48981b3162eaf46d8fd60ca63f1ceb0f81ce484f8655876e7a02d72b531202f3342ef020283252e63d805c194e3b5ed32380"
params = { "ssid": "dpp",
"wpa": "2",
"wpa_key_mgmt": "DPP",
"ieee80211w": "2",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
id = dev[0].connect("dpp", key_mgmt="DPP", scan_freq="2412",
ieee80211w="2",
dpp_csign=csign,
dpp_connector=sta_connector,
dpp_netaccesskey=sta_netaccesskey)
val = dev[0].get_status_field("key_mgmt")
if val != "DPP":
raise Exception("Unexpected key_mgmt: " + val)
def test_dpp_ap_config(dev, apdev):
"""DPP and AP configuration"""
run_dpp_ap_config(dev, apdev)
def test_dpp_ap_config_p256_p256(dev, apdev):
"""DPP and AP configuration (P-256 + P-256)"""
run_dpp_ap_config(dev, apdev, curve="P-256", conf_curve="P-256")
def test_dpp_ap_config_p256_p384(dev, apdev):
"""DPP and AP configuration (P-256 + P-384)"""
run_dpp_ap_config(dev, apdev, curve="P-256", conf_curve="P-384")
def test_dpp_ap_config_p256_p521(dev, apdev):
"""DPP and AP configuration (P-256 + P-521)"""
run_dpp_ap_config(dev, apdev, curve="P-256", conf_curve="P-521")
def test_dpp_ap_config_p384_p256(dev, apdev):
"""DPP and AP configuration (P-384 + P-256)"""
run_dpp_ap_config(dev, apdev, curve="P-384", conf_curve="P-256")
def test_dpp_ap_config_p384_p384(dev, apdev):
"""DPP and AP configuration (P-384 + P-384)"""
run_dpp_ap_config(dev, apdev, curve="P-384", conf_curve="P-384")
def test_dpp_ap_config_p384_p521(dev, apdev):
"""DPP and AP configuration (P-384 + P-521)"""
run_dpp_ap_config(dev, apdev, curve="P-384", conf_curve="P-521")
def test_dpp_ap_config_p521_p256(dev, apdev):
"""DPP and AP configuration (P-521 + P-256)"""
run_dpp_ap_config(dev, apdev, curve="P-521", conf_curve="P-256")
def test_dpp_ap_config_p521_p384(dev, apdev):
"""DPP and AP configuration (P-521 + P-384)"""
run_dpp_ap_config(dev, apdev, curve="P-521", conf_curve="P-384")
def test_dpp_ap_config_p521_p521(dev, apdev):
"""DPP and AP configuration (P-521 + P-521)"""
run_dpp_ap_config(dev, apdev, curve="P-521", conf_curve="P-521")
def update_hapd_config(hapd):
ev = hapd.wait_event(["DPP-CONFOBJ-SSID"], timeout=1)
if ev is None:
raise Exception("SSID not reported (AP)")
ssid = ev.split(' ')[1]
ev = hapd.wait_event(["DPP-CONNECTOR"], timeout=1)
if ev is None:
raise Exception("Connector not reported (AP)")
connector = ev.split(' ')[1]
ev = hapd.wait_event(["DPP-C-SIGN-KEY"], timeout=1)
if ev is None:
raise Exception("C-sign-key not reported (AP)")
p = ev.split(' ')
csign = p[1]
ev = hapd.wait_event(["DPP-NET-ACCESS-KEY"], timeout=1)
if ev is None:
raise Exception("netAccessKey not reported (AP)")
p = ev.split(' ')
net_access_key = p[1]
net_access_key_expiry = p[2] if len(p) > 2 else None
logger.info("Update AP configuration to use key_mgmt=DPP")
hapd.disable()
hapd.set("ssid", ssid)
hapd.set("wpa", "2")
hapd.set("wpa_key_mgmt", "DPP")
hapd.set("ieee80211w", "2")
hapd.set("rsn_pairwise", "CCMP")
hapd.set("dpp_connector", connector)
hapd.set("dpp_csign", csign)
hapd.set("dpp_netaccesskey", net_access_key)
if net_access_key_expiry:
hapd.set("dpp_netaccesskey_expiry", net_access_key_expiry)
hapd.enable()
def run_dpp_ap_config(dev, apdev, curve=None, conf_curve=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
if curve:
cmd += " curve=" + curve
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id_h = int(res)
uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id_h)
cmd = "DPP_CONFIGURATOR_ADD"
if conf_curve:
cmd += " curve=" + conf_curve
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id = int(res)
cmd = "DPP_AUTH_INIT peer=%d conf=ap-dpp configurator=%d" % (id, conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED", "DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
if "DPP-CONF-FAILED" in ev:
raise Exception("DPP configuration failed")
update_hapd_config(hapd)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
if curve:
cmd += " curve=" + curve
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
res = dev[0].request("DPP_QR_CODE " + uri1)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id0b, conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[1].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[1].request("DPP_STOP_LISTEN")
ev = dev[1].wait_event(["DPP-CONFOBJ-SSID"], timeout=1)
if ev is None:
raise Exception("SSID not reported")
ssid = ev.split(' ')[1]
ev = dev[1].wait_event(["DPP-CONNECTOR"], timeout=1)
if ev is None:
raise Exception("Connector not reported")
connector = ev.split(' ')[1]
ev = dev[1].wait_event(["DPP-C-SIGN-KEY"], timeout=1)
if ev is None:
raise Exception("C-sign-key not reported")
p = ev.split(' ')
csign = p[1]
ev = dev[1].wait_event(["DPP-NET-ACCESS-KEY"], timeout=1)
if ev is None:
raise Exception("netAccessKey not reported")
p = ev.split(' ')
net_access_key = p[1]
net_access_key_expiry = p[2] if len(p) > 2 else None
dev[1].dump_monitor()
id = dev[1].connect(ssid, key_mgmt="DPP", ieee80211w="2", scan_freq="2412",
only_add_network=True)
dev[1].set_network_quoted(id, "dpp_connector", connector)
dev[1].set_network(id, "dpp_csign", csign)
dev[1].set_network(id, "dpp_netaccesskey", net_access_key)
if net_access_key_expiry:
dev[1].set_network(id, "dpp_netaccess_expiry", net_access_key_expiry)
logger.info("Check data connection")
dev[1].select_network(id, freq="2412")
dev[1].wait_connected()
def test_dpp_auto_connect_1(dev, apdev):
"""DPP and auto connect (1)"""
try:
run_dpp_auto_connect(dev, apdev, 1)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_2(dev, apdev):
"""DPP and auto connect (2)"""
try:
run_dpp_auto_connect(dev, apdev, 2)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_2_connect_cmd(dev, apdev):
"""DPP and auto connect (2) using connect_cmd"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
dev_new = [ wpas, dev[1] ]
try:
run_dpp_auto_connect(dev_new, apdev, 2)
finally:
wpas.set("dpp_config_processing", "0")
def run_dpp_auto_connect(dev, apdev, processing):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "test",
"wpa": "2",
"wpa_key_mgmt": "DPP",
"ieee80211w": "2",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("DPP_CONFIGURATOR_ADD failed")
conf_id = int(res)
dev[0].set("dpp_config_processing", str(processing))
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=2)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
ev = dev[0].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("DPP network profile not generated")
id = ev.split(' ')[1]
if processing == 1:
dev[0].select_network(id, freq=2412)
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def test_dpp_auto_connect_legacy(dev, apdev):
"""DPP and auto connect (legacy)"""
try:
run_dpp_auto_connect_legacy(dev, apdev)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_legacy_sae_1(dev, apdev):
"""DPP and auto connect (legacy SAE)"""
try:
run_dpp_auto_connect_legacy(dev, apdev, conf='sta-sae', psk_sae=True)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_legacy_sae_2(dev, apdev):
"""DPP and auto connect (legacy SAE)"""
try:
run_dpp_auto_connect_legacy(dev, apdev, conf='sta-sae', sae_only=True)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_legacy_psk_sae_1(dev, apdev):
"""DPP and auto connect (legacy PSK+SAE)"""
try:
run_dpp_auto_connect_legacy(dev, apdev, conf='sta-psk-sae',
psk_sae=True)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_legacy_psk_sae_2(dev, apdev):
"""DPP and auto connect (legacy PSK+SAE)"""
try:
run_dpp_auto_connect_legacy(dev, apdev, conf='sta-psk-sae',
sae_only=True)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_auto_connect_legacy_psk_sae_3(dev, apdev):
"""DPP and auto connect (legacy PSK+SAE)"""
try:
run_dpp_auto_connect_legacy(dev, apdev, conf='sta-psk-sae')
finally:
dev[0].set("dpp_config_processing", "0")
def run_dpp_auto_connect_legacy(dev, apdev, conf='sta-psk',
psk_sae=False, sae_only=False):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
params = hostapd.wpa2_params(ssid="dpp-legacy",
passphrase="secret passphrase")
if sae_only:
params['wpa_key_mgmt'] = 'SAE'
params['ieee80211w'] = '2'
elif psk_sae:
params['wpa_key_mgmt'] = 'WPA-PSK SAE'
params['ieee80211w'] = '1'
params['sae_require_mfp'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].set("dpp_config_processing", "2")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=%s ssid=%s pass=%s" % (id1, conf, "dpp-legacy".encode("hex"), "secret passphrase".encode("hex"))
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=2)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
ev = dev[0].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("DPP network profile not generated")
id = ev.split(' ')[1]
dev[0].wait_connected()
def test_dpp_auto_connect_legacy_pmf_required(dev, apdev):
"""DPP and auto connect (legacy, PMF required)"""
try:
run_dpp_auto_connect_legacy_pmf_required(dev, apdev)
finally:
dev[0].set("dpp_config_processing", "0")
def run_dpp_auto_connect_legacy_pmf_required(dev, apdev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
params = hostapd.wpa2_params(ssid="dpp-legacy",
passphrase="secret passphrase")
params['wpa_key_mgmt'] = "WPA-PSK-SHA256"
params['ieee80211w'] = "2"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].set("dpp_config_processing", "2")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-psk ssid=%s pass=%s" % (id1, "dpp-legacy".encode("hex"), "secret passphrase".encode("hex"))
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=2)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
ev = dev[0].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("DPP network profile not generated")
id = ev.split(' ')[1]
dev[0].wait_connected()
def test_dpp_qr_code_auth_responder_configurator(dev, apdev):
"""DPP QR Code and responder as the configurator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
dev[0].set("dpp_configurator_params", " conf=sta-dpp configurator=%d" % conf_id);
cmd = "DPP_LISTEN 2412 role=configurator"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[1].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_dpp_qr_code_hostapd_init(dev, apdev):
"""DPP QR Code and hostapd as initiator"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "6" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[0].set("dpp_configurator_params",
" conf=ap-dpp configurator=%d" % conf_id);
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
res = hapd.request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1
if "OK" not in hapd.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_qr_code_hostapd_init_offchannel(dev, apdev):
"""DPP QR Code and hostapd as initiator (offchannel)"""
run_dpp_qr_code_hostapd_init_offchannel(dev, apdev, None)
def test_dpp_qr_code_hostapd_init_offchannel_neg_freq(dev, apdev):
"""DPP QR Code and hostapd as initiator (offchannel, neg_freq)"""
run_dpp_qr_code_hostapd_init_offchannel(dev, apdev, "neg_freq=2437")
def run_dpp_qr_code_hostapd_init_offchannel(dev, apdev, extra):
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "6" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1,81/11 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[0].set("dpp_configurator_params",
" conf=ap-dpp configurator=%d" % conf_id);
cmd = "DPP_LISTEN 2462 role=configurator"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
res = hapd.request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1
if extra:
cmd += " " + extra
if "OK" not in hapd.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_test_vector_p_256(dev, apdev):
"""DPP P-256 test vector (mutual auth)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
# Responder bootstrapping key
priv = "54ce181a98525f217216f59b245f60e9df30ac7f6b26c939418cfc3c42d1afa0"
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/11 mac=" + addr + " key=30310201010420" + priv + "a00a06082a8648ce3d030107"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
# Responder protocol keypair override
priv = "f798ed2e19286f6a6efe210b1863badb99af2a14b497634dbfd2a97394fb5aa5"
dev[0].set("dpp_protocol_key_override",
"30310201010420" + priv + "a00a06082a8648ce3d030107")
dev[0].set("dpp_nonce_override", "3d0cfb011ca916d796f7029ff0b43393")
# Initiator bootstrapping key
priv = "15b2a83c5a0a38b61f2aa8200ee4994b8afdc01c58507d10d0a38f7eedf051bb"
cmd = "DPP_BOOTSTRAP_GEN type=qrcode key=30310201010420" + priv + "a00a06082a8648ce3d030107"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
# Initiator protocol keypair override
priv = "a87de9afbb406c96e5f79a3df895ecac3ad406f95da66314c8cb3165e0c61783"
dev[1].set("dpp_protocol_key_override",
"30310201010420" + priv + "a00a06082a8648ce3d030107")
dev[1].set("dpp_nonce_override", "13f4602a16daeb69712263b9c46cba31")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1peer = int(res)
res = dev[0].request("DPP_QR_CODE " + uri1)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0peer = int(res)
cmd = "DPP_LISTEN 2462 qr=mutual"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d own=%d neg_freq=2412" % (id1peer, id1)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate operation")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
def test_dpp_test_vector_p_256_b(dev, apdev):
"""DPP P-256 test vector (Responder-only auth)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
# Responder bootstrapping key
priv = "54ce181a98525f217216f59b245f60e9df30ac7f6b26c939418cfc3c42d1afa0"
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/11 mac=" + addr + " key=30310201010420" + priv + "a00a06082a8648ce3d030107"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
# Responder protocol keypair override
priv = "f798ed2e19286f6a6efe210b1863badb99af2a14b497634dbfd2a97394fb5aa5"
dev[0].set("dpp_protocol_key_override",
"30310201010420" + priv + "a00a06082a8648ce3d030107")
dev[0].set("dpp_nonce_override", "3d0cfb011ca916d796f7029ff0b43393")
# Initiator bootstrapping key
priv = "15b2a83c5a0a38b61f2aa8200ee4994b8afdc01c58507d10d0a38f7eedf051bb"
cmd = "DPP_BOOTSTRAP_GEN type=qrcode key=30310201010420" + priv + "a00a06082a8648ce3d030107"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
# Initiator protocol keypair override
priv = "a87de9afbb406c96e5f79a3df895ecac3ad406f95da66314c8cb3165e0c61783"
dev[1].set("dpp_protocol_key_override",
"30310201010420" + priv + "a00a06082a8648ce3d030107")
dev[1].set("dpp_nonce_override", "13f4602a16daeb69712263b9c46cba31")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1peer = int(res)
cmd = "DPP_LISTEN 2462"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d own=%d neg_freq=2412" % (id1peer, id1)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate operation")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
def der_priv_key_p_521(priv):
if len(priv) != 2 * 66:
raise Exception("Unexpected der_priv_key_p_521 parameter: " + priv)
der_prefix = "3081500201010442"
der_postfix = "a00706052b81040023"
return der_prefix + priv + der_postfix
def test_dpp_test_vector_p_521(dev, apdev):
"""DPP P-521 test vector (mutual auth)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
# Responder bootstrapping key
priv = "0061e54f518cdf859735da3dd64c6f72c2f086f41a6fd52915152ea2fe0f24ddaecd8883730c9c9fd82cf7c043a41021696388cf5190b731dd83638bcd56d8b6c743"
addr = dev[0].own_addr().replace(':', '')
#cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/11 mac=" + addr + " key=" + der_prefix + priv + der_postfix
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/11 mac=" + addr + " key=" + der_priv_key_p_521(priv)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
# Responder protocol keypair override
priv = "01d8b7b17cd1b0a33f7c66fb4220999329cdaf4f8b44b2ffadde8ab8ed8abffa9f5358c5b1caae26709ca4fb78e52a4d08f2e4f24111a36a6f440d20a0000ff51597"
dev[0].set("dpp_protocol_key_override", der_priv_key_p_521(priv))
dev[0].set("dpp_nonce_override",
"d749a782012eb0a8595af30b2dfc8d0880d004ebddb55ecc5afbdef18c400e01")
# Initiator bootstrapping key
priv = "0060c10df14af5ef27f6e362d31bdd9eeb44be77a323ba64b08f3f03d58b92cbfe05c182a91660caa081ca344243c47b5aa088bcdf738840eb35f0218b9f26881e02"
cmd = "DPP_BOOTSTRAP_GEN type=qrcode key=" + der_priv_key_p_521(priv)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
# Initiator protocol keypair override
priv = "019c1c08caaeec38fb931894699b095bc3ab8c1ec7ef0622d2e3eba821477c8c6fca41774f21166ad98aebda37c067d9aa08a8a2e1b5c44c61f2bae02a61f85d9661"
dev[1].set("dpp_protocol_key_override", der_priv_key_p_521(priv))
dev[1].set("dpp_nonce_override",
"de972af3847bec3ba2aedd9f5c21cfdec7bf0bc5fe8b276cbcd0267807fb15b0")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1peer = int(res)
res = dev[0].request("DPP_QR_CODE " + uri1)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0peer = int(res)
cmd = "DPP_LISTEN 2462 qr=mutual"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d own=%d neg_freq=2412" % (id1peer, id1)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate operation")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
def test_dpp_pkex(dev, apdev):
"""DPP and PKEX"""
run_dpp_pkex(dev, apdev)
def test_dpp_pkex_p256(dev, apdev):
"""DPP and PKEX (P-256)"""
run_dpp_pkex(dev, apdev, "P-256")
def test_dpp_pkex_p384(dev, apdev):
"""DPP and PKEX (P-384)"""
run_dpp_pkex(dev, apdev, "P-384")
def test_dpp_pkex_p521(dev, apdev):
"""DPP and PKEX (P-521)"""
run_dpp_pkex(dev, apdev, "P-521")
def test_dpp_pkex_bp256(dev, apdev):
"""DPP and PKEX (BP-256)"""
run_dpp_pkex(dev, apdev, "brainpoolP256r1")
def test_dpp_pkex_bp384(dev, apdev):
"""DPP and PKEX (BP-384)"""
run_dpp_pkex(dev, apdev, "brainpoolP384r1")
def test_dpp_pkex_bp512(dev, apdev):
"""DPP and PKEX (BP-512)"""
run_dpp_pkex(dev, apdev, "brainpoolP512r1")
def test_dpp_pkex_config(dev, apdev):
"""DPP and PKEX with initiator as the configurator"""
check_dpp_capab(dev[1])
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
run_dpp_pkex(dev, apdev,
init_extra="conf=sta-dpp configurator=%d" % (conf_id),
check_config=True)
def run_dpp_pkex(dev, apdev, curve=None, init_extra="", check_config=False):
check_dpp_capab(dev[0], curve and "brainpool" in curve)
check_dpp_capab(dev[1], curve and "brainpool" in curve)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
if curve:
cmd += " curve=" + curve
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
if curve:
cmd += " curve=" + curve
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 %s code=secret" % (id1, init_extra)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
if check_config:
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
def test_dpp_pkex_5ghz(dev, apdev):
"""DPP and PKEX on 5 GHz"""
try:
dev[0].request("SET country US")
dev[1].request("SET country US")
ev = dev[0].wait_event(["CTRL-EVENT-REGDOM-CHANGE"], timeout=1)
if ev is None:
ev = dev[0].wait_global_event(["CTRL-EVENT-REGDOM-CHANGE"],
timeout=1)
run_dpp_pkex_5ghz(dev, apdev)
finally:
dev[0].request("SET country 00")
dev[1].request("SET country 00")
subprocess.call(['iw', 'reg', 'set', '00'])
def run_dpp_pkex_5ghz(dev, apdev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 5745"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS", "DPP-FAIL"], timeout=20)
if ev is None or "DPP-AUTH-SUCCESS" not in ev:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
def test_dpp_pkex_test_vector(dev, apdev):
"""DPP and PKEX (P-256) test vector"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
init_addr = "ac:64:91:f4:52:07"
resp_addr = "6e:5e:ce:6e:f3:dd"
identifier = "joes_key"
code = "thisisreallysecret"
# Initiator bootstrapping private key
init_priv = "5941b51acfc702cdc1c347264beb2920db88eb1a0bf03a211868b1632233c269"
# Responder bootstrapping private key
resp_priv = "2ae8956293f49986b6d0b8169a86805d9232babb5f6813fdfe96f19d59536c60"
# Initiator x/X keypair override
init_x_priv = "8365c5ed93d751bef2d92b410dc6adfd95670889183fac1bd66759ad85c3187a"
# Responder y/Y keypair override
resp_y_priv = "d98faa24d7dd3f592665d71a95c862bfd02c4c48acb0c515a41cbc6e929675ea"
p256_prefix = "30310201010420"
p256_postfix = "a00a06082a8648ce3d030107"
dev[0].set("dpp_pkex_own_mac_override", resp_addr)
dev[0].set("dpp_pkex_peer_mac_override", init_addr)
dev[1].set("dpp_pkex_own_mac_override", init_addr)
dev[1].set("dpp_pkex_peer_mac_override", resp_addr)
# Responder bootstrapping key
cmd = "DPP_BOOTSTRAP_GEN type=pkex key=" + p256_prefix + resp_priv + p256_postfix
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
# Responder y/Y keypair override
dev[0].set("dpp_pkex_ephemeral_key_override",
p256_prefix + resp_y_priv + p256_postfix)
# Initiator bootstrapping key
cmd = "DPP_BOOTSTRAP_GEN type=pkex key=" + p256_prefix + init_priv + p256_postfix
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
# Initiator x/X keypair override
dev[1].set("dpp_pkex_ephemeral_key_override",
p256_prefix + init_x_priv + p256_postfix)
cmd = "DPP_PKEX_ADD own=%d identifier=%s code=%s" % (id0, identifier, code)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=%s init=1 code=%s" % (id1, identifier, code)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
def test_dpp_pkex_code_mismatch(dev, apdev):
"""DPP and PKEX with mismatching code"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=unknown" % id1
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("Failure not reported")
if "possible PKEX code mismatch" not in ev:
raise Exception("Unexpected result: " + ev)
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=secret" % id1
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator, retry)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator, retry)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder, retry)")
def test_dpp_pkex_code_mismatch_limit(dev, apdev):
"""DPP and PKEX with mismatching code limit"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
for i in range(5):
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=unknown" % id1
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("Failure not reported")
if "possible PKEX code mismatch" not in ev:
raise Exception("Unexpected result: " + ev)
ev = dev[0].wait_event(["DPP-PKEX-T-LIMIT"], timeout=1)
if ev is None:
raise Exception("PKEX t limit not reported")
def test_dpp_pkex_curve_mismatch(dev, apdev):
"""DPP and PKEX with mismatching curve"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=pkex curve=P-256"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex curve=P-384"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=secret" % id1
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("Failure not reported (dev 0)")
if "Mismatching PKEX curve: peer=20 own=19" not in ev:
raise Exception("Unexpected result: " + ev)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("Failure not reported (dev 1)")
if "Peer indicated mismatching PKEX group - proposed 19" not in ev:
raise Exception("Unexpected result: " + ev)
def test_dpp_pkex_config2(dev, apdev):
"""DPP and PKEX with responder as the configurator"""
check_dpp_capab(dev[0])
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[0].set("dpp_configurator_params",
" conf=sta-dpp configurator=%d" % conf_id);
run_dpp_pkex2(dev, apdev)
def run_dpp_pkex2(dev, apdev, curve=None, init_extra=""):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
if curve:
cmd += " curve=" + curve
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
if curve:
cmd += " curve=" + curve
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 role=enrollee %s code=secret" % (id1, init_extra)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[1].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
def test_dpp_pkex_no_responder(dev, apdev):
"""DPP and PKEX with no responder (retry behavior)"""
check_dpp_capab(dev[0])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_PKEX_ADD own=%d init=1 identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
ev = dev[0].wait_event(["DPP-FAIL"], timeout=15)
if ev is None:
raise Exception("DPP PKEX failure not reported")
if "No response from PKEX peer" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_dpp_pkex_after_retry(dev, apdev):
"""DPP and PKEX completing after retry"""
check_dpp_capab(dev[0])
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_PKEX_ADD own=%d init=1 identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
time.sleep(0.1)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=10)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
# Ignore Enrollee result since configurator was not set here
def test_dpp_pkex_hostapd_responder(dev, apdev):
"""DPP PKEX with hostapd as responder"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "6" })
check_dpp_capab(hapd)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info (hostapd)")
id_h = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id_h)
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder/hostapd)")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info (wpa_supplicant)")
id0 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 conf=ap-dpp configurator=%d code=secret" % (id0, conf_id)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator/wpa_supplicant)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_pkex_hostapd_initiator(dev, apdev):
"""DPP PKEX with hostapd as initiator"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "6" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info (wpa_supplicant)")
id0 = int(res)
dev[0].set("dpp_configurator_params",
" conf=ap-dpp configurator=%d" % conf_id);
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder/wpa_supplicant)")
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info (hostapd)")
id_h = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 role=enrollee code=secret" % (id_h)
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator/hostapd)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_hostapd_configurator(dev, apdev):
"""DPP with hostapd as configurator/initiator"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "1" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = hapd.request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = hapd.request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
res = hapd.request("DPP_BOOTSTRAP_INFO %d" % id0)
if "FAIL" in res:
raise Exception("DPP_BOOTSTRAP_INFO failed")
if "type=QRCODE" not in res:
raise Exception("DPP_BOOTSTRAP_INFO did not report correct type")
if "mac_addr=" + dev[0].own_addr() not in res:
raise Exception("DPP_BOOTSTRAP_INFO did not report correct mac_addr")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d configurator=%d conf=sta-dpp" % (id1, conf_id)
if "OK" not in hapd.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = hapd.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_hostapd_configurator_responder(dev, apdev):
"""DPP with hostapd as configurator/responder"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured",
"channel": "1" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = hapd.request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
hapd.set("dpp_configurator_params",
" conf=sta-dpp configurator=%d" % conf_id);
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[0].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % (id1)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = hapd.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
def test_dpp_own_config(dev, apdev):
"""DPP configurator signing own connector"""
try:
run_dpp_own_config(dev, apdev)
finally:
dev[0].set("dpp_config_processing", "0")
def test_dpp_own_config_curve_mismatch(dev, apdev):
"""DPP configurator signing own connector using mismatching curve"""
try:
run_dpp_own_config(dev, apdev, own_curve="BP-384", expect_failure=True)
finally:
dev[0].set("dpp_config_processing", "0")
def run_dpp_own_config(dev, apdev, own_curve=None, expect_failure=False):
check_dpp_capab(dev[0], own_curve and "BP" in own_curve)
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id_h = int(res)
uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id_h)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id = int(res)
cmd = "DPP_AUTH_INIT peer=%d conf=ap-dpp configurator=%d" % (id, conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = hapd.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
update_hapd_config(hapd)
dev[0].set("dpp_config_processing", "1")
cmd = "DPP_CONFIGURATOR_SIGN conf=sta-dpp configurator=%d" % (conf_id)
if own_curve:
cmd += " curve=" + own_curve
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate own configuration")
ev = dev[0].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("DPP network profile not generated")
id = ev.split(' ')[1]
dev[0].select_network(id, freq="2412")
if expect_failure:
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection");
dev[0].request("DISCONNECT")
else:
dev[0].wait_connected()
def test_dpp_own_config_ap(dev, apdev):
"""DPP configurator (AP) signing own connector"""
try:
run_dpp_own_config_ap(dev, apdev)
finally:
dev[0].set("dpp_config_processing", "0")
def run_dpp_own_config_ap(dev, apdev):
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
cmd = "DPP_CONFIGURATOR_ADD"
res = hapd.request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_CONFIGURATOR_SIGN conf=ap-dpp configurator=%d" % (conf_id)
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate own configuration")
update_hapd_config(hapd)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id = int(res)
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id)
res = hapd.request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id = int(res)
dev[0].set("dpp_config_processing", "2")
if "OK" not in dev[0].request("DPP_LISTEN 2412"):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id, conf_id)
if "OK" not in hapd.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = hapd.wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = hapd.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED", "DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
if "DPP-CONF-RECEIVED" not in ev:
raise Exception("DPP configuration failed (Enrollee)")
dev[0].wait_connected()
def test_dpp_intro_mismatch(dev, apdev):
"""DPP network introduction mismatch cases"""
try:
wpas = None
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
check_dpp_capab(wpas)
run_dpp_intro_mismatch(dev, apdev, wpas)
finally:
dev[0].set("dpp_config_processing", "0")
dev[2].set("dpp_config_processing", "0")
if wpas:
wpas.set("dpp_config_processing", "0")
def run_dpp_intro_mismatch(dev, apdev, wpas):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
check_dpp_capab(dev[2])
logger.info("Start AP in unconfigured state")
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id_h = int(res)
uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id_h)
logger.info("Provision AP with DPP configuration")
res = dev[1].request("DPP_CONFIGURATOR_ADD");
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id = int(res)
dev[1].set("dpp_groups_override", '[{"groupId":"a","netRole":"ap"}]')
cmd = "DPP_AUTH_INIT peer=%d conf=ap-dpp configurator=%d" % (id, conf_id)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
update_hapd_config(hapd)
logger.info("Provision STA0 with DPP Connector that has mismatching groupId")
dev[0].set("dpp_config_processing", "2")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
dev[1].set("dpp_groups_override", '[{"groupId":"b","netRole":"sta"}]')
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator for STA0)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee STA0)")
logger.info("Provision STA2 with DPP Connector that has mismatching C-sign-key")
dev[2].set("dpp_config_processing", "2")
addr = dev[2].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[2].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id2 = int(res)
uri2 = dev[2].request("DPP_BOOTSTRAP_GET_URI %d" % id2)
res = dev[1].request("DPP_QR_CODE " + uri2)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[2].request(cmd):
raise Exception("Failed to start listen operation")
res = dev[1].request("DPP_CONFIGURATOR_ADD");
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id_2 = int(res)
dev[1].set("dpp_groups_override", '')
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id_2)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator for STA2)")
ev = dev[2].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee STA2)")
logger.info("Provision STA5 with DPP Connector that has mismatching netAccessKey EC group")
wpas.set("dpp_config_processing", "2")
addr = wpas.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
cmd += " curve=P-521"
res = wpas.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id5 = int(res)
uri5 = wpas.request("DPP_BOOTSTRAP_GET_URI %d" % id5)
res = dev[1].request("DPP_QR_CODE " + uri5)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_LISTEN 2412"
if "OK" not in wpas.request(cmd):
raise Exception("Failed to start listen operation")
dev[1].set("dpp_groups_override", '')
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id)
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator for STA0)")
ev = wpas.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee STA5)")
logger.info("Verify network introduction results")
ev = dev[0].wait_event(["DPP-INTRO"], timeout=10)
if ev is None:
raise Exception("DPP network introduction result not seen on STA0")
if "status=8" not in ev:
raise Exception("Unexpected network introduction result on STA0: " + ev)
ev = dev[2].wait_event(["DPP-INTRO"], timeout=5)
if ev is None:
raise Exception("DPP network introduction result not seen on STA2")
if "status=8" not in ev:
raise Exception("Unexpected network introduction result on STA2: " + ev)
ev = wpas.wait_event(["DPP-INTRO"], timeout=10)
if ev is None:
raise Exception("DPP network introduction result not seen on STA5")
if "status=7" not in ev:
raise Exception("Unexpected network introduction result on STA5: " + ev)
def run_dpp_proto_init(dev, test_dev, test, mutual=False, unicast=True,
listen=True, chan="81/1", init_enrollee=False):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[test_dev].set("dpp_test", str(test))
cmd = "DPP_CONFIGURATOR_ADD"
if init_enrollee:
res = dev[0].request(cmd)
else:
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode"
if chan:
cmd += " chan=" + chan
if unicast:
cmd += " mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
if mutual:
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
cmd = "DPP_LISTEN 2412 qr=mutual"
else:
cmd = "DPP_LISTEN 2412"
if init_enrollee:
cmd += " role=configurator"
dev[0].set("dpp_configurator_params",
" conf=sta-dpp configurator=%d" % conf_id);
if listen:
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
if init_enrollee:
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % (id1)
else:
cmd = "DPP_AUTH_INIT peer=%d configurator=%d conf=sta-dpp" % (id1, conf_id)
if mutual:
cmd += " own=%d" % id1b
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
def test_dpp_proto_after_wrapped_data_auth_req(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in Auth Req"""
run_dpp_proto_init(dev, 1, 1)
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Request not seen")
if "type=0" not in ev or "ignore=invalid-attributes" not in ev:
raise Exception("Unexpected RX info: " + ev)
ev = dev[1].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_auth_req_stop_after_ack(dev, apdev):
"""DPP initiator stopping after ACK, but no response"""
run_dpp_proto_init(dev, 1, 1, listen=True)
ev = dev[1].wait_event(["DPP-AUTH-INIT-FAILED"], timeout=5)
if ev is None:
raise Exception("Authentication failure not reported")
def test_dpp_auth_req_retries(dev, apdev):
"""DPP initiator retries with no ACK"""
check_dpp_capab(dev[1])
dev[1].set("dpp_init_max_tries", "3")
dev[1].set("dpp_init_retry_time", "1000")
dev[1].set("dpp_resp_wait_time", "100")
run_dpp_proto_init(dev, 1, 1, unicast=False, listen=False)
for i in range(3):
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("Auth Req not sent (%d)" % i)
ev = dev[1].wait_event(["DPP-AUTH-INIT-FAILED"], timeout=5)
if ev is None:
raise Exception("Authentication failure not reported")
def test_dpp_auth_req_retries_multi_chan(dev, apdev):
"""DPP initiator retries with no ACK and multiple channels"""
check_dpp_capab(dev[1])
dev[1].set("dpp_init_max_tries", "3")
dev[1].set("dpp_init_retry_time", "1000")
dev[1].set("dpp_resp_wait_time", "100")
run_dpp_proto_init(dev, 1, 1, unicast=False, listen=False,
chan="81/1,81/6,81/11")
for i in range(3 * 3):
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("Auth Req not sent (%d)" % i)
ev = dev[1].wait_event(["DPP-AUTH-INIT-FAILED"], timeout=5)
if ev is None:
raise Exception("Authentication failure not reported")
def test_dpp_proto_after_wrapped_data_auth_resp(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in Auth Resp"""
run_dpp_proto_init(dev, 0, 2)
ev = dev[1].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Response not seen")
if "type=1" not in ev or "ignore=invalid-attributes" not in ev:
raise Exception("Unexpected RX info: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=1)
if ev is None or "type=0" not in ev:
raise Exception("DPP Authentication Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_proto_after_wrapped_data_auth_conf(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in Auth Conf"""
run_dpp_proto_init(dev, 1, 3)
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None or "type=0" not in ev:
raise Exception("DPP Authentication Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None:
raise Exception("DPP Authentication Confirm not seen")
if "type=2" not in ev or "ignore=invalid-attributes" not in ev:
raise Exception("Unexpected RX info: " + ev)
def test_dpp_proto_after_wrapped_data_conf_req(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in Conf Req"""
run_dpp_proto_init(dev, 0, 6)
ev = dev[1].wait_event(["DPP-CONF-FAILED"], timeout=10)
if ev is None:
raise Exception("DPP Configuration failure not seen")
def test_dpp_proto_after_wrapped_data_conf_resp(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in Conf Resp"""
run_dpp_proto_init(dev, 1, 7)
ev = dev[0].wait_event(["DPP-CONF-FAILED"], timeout=10)
if ev is None:
raise Exception("DPP Configuration failure not seen")
def test_dpp_proto_zero_i_capab(dev, apdev):
"""DPP protocol testing - zero I-capability in Auth Req"""
run_dpp_proto_init(dev, 1, 8)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Invalid role in I-capabilities 0x00" not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[1].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_proto_zero_r_capab(dev, apdev):
"""DPP protocol testing - zero R-capability in Auth Resp"""
run_dpp_proto_init(dev, 0, 9)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Unexpected role in R-capabilities 0x00" not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=1)
if ev is None or "type=0" not in ev:
raise Exception("DPP Authentication Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def run_dpp_proto_auth_req_missing(dev, test, reason, mutual=False):
run_dpp_proto_init(dev, 1, test, mutual=mutual)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[1].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_proto_auth_req_no_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - no R-bootstrap key in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 10, "Missing or invalid required Responder Bootstrapping Key Hash attribute")
def test_dpp_proto_auth_req_invalid_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid R-bootstrap key in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 68, "No matching own bootstrapping key found - ignore message")
def test_dpp_proto_auth_req_no_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - no I-bootstrap key in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 11, "Missing or invalid required Initiator Bootstrapping Key Hash attribute")
def test_dpp_proto_auth_req_invalid_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid I-bootstrap key in Auth Req"""
run_dpp_proto_init(dev, 1, 69, mutual=True)
ev = dev[0].wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("DPP scan request not seen")
ev = dev[1].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("DPP response pending indivation not seen")
def test_dpp_proto_auth_req_no_i_proto_key(dev, apdev):
"""DPP protocol testing - no I-proto key in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 12, "Missing required Initiator Protocol Key attribute")
def test_dpp_proto_auth_req_invalid_i_proto_key(dev, apdev):
"""DPP protocol testing - invalid I-proto key in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 66, "Invalid Initiator Protocol Key")
def test_dpp_proto_auth_req_no_i_nonce(dev, apdev):
"""DPP protocol testing - no I-nonce in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 13, "Missing or invalid I-nonce")
def test_dpp_proto_auth_req_invalid_i_nonce(dev, apdev):
"""DPP protocol testing - invalid I-nonce in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 81, "Missing or invalid I-nonce")
def test_dpp_proto_auth_req_no_i_capab(dev, apdev):
"""DPP protocol testing - no I-capab in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 14, "Missing or invalid I-capab")
def test_dpp_proto_auth_req_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in Auth Req"""
run_dpp_proto_auth_req_missing(dev, 15, "Missing or invalid required Wrapped Data attribute")
def run_dpp_proto_auth_resp_missing(dev, test, reason):
run_dpp_proto_init(dev, 0, test, mutual=True)
if reason is None:
time.sleep(0.1)
return
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=1)
if ev is None or "type=0" not in ev:
raise Exception("DPP Authentication Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_proto_auth_resp_no_status(dev, apdev):
"""DPP protocol testing - no Status in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 16, "Missing or invalid required DPP Status attribute")
def test_dpp_proto_auth_resp_invalid_status(dev, apdev):
"""DPP protocol testing - invalid Status in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 74, "Responder reported failure")
def test_dpp_proto_auth_resp_no_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - no R-bootstrap key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 17, "Missing or invalid required Responder Bootstrapping Key Hash attribute")
def test_dpp_proto_auth_resp_invalid_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid R-bootstrap key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 70, "Unexpected Responder Bootstrapping Key Hash value")
def test_dpp_proto_auth_resp_no_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - no I-bootstrap key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 18, None)
def test_dpp_proto_auth_resp_invalid_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid I-bootstrap key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 71, "Initiator Bootstrapping Key Hash attribute did not match")
def test_dpp_proto_auth_resp_no_r_proto_key(dev, apdev):
"""DPP protocol testing - no R-Proto Key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 19, "Missing required Responder Protocol Key attribute")
def test_dpp_proto_auth_resp_invalid_r_proto_key(dev, apdev):
"""DPP protocol testing - invalid R-Proto Key in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 67, "Invalid Responder Protocol Key")
def test_dpp_proto_auth_resp_no_r_nonce(dev, apdev):
"""DPP protocol testing - no R-nonce in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 20, "Missing or invalid R-nonce")
def test_dpp_proto_auth_resp_no_i_nonce(dev, apdev):
"""DPP protocol testing - no I-nonce in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 21, "Missing or invalid I-nonce")
def test_dpp_proto_auth_resp_no_r_capab(dev, apdev):
"""DPP protocol testing - no R-capab in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 22, "Missing or invalid R-capabilities")
def test_dpp_proto_auth_resp_no_r_auth(dev, apdev):
"""DPP protocol testing - no R-auth in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 23, "Missing or invalid Secondary Wrapped Data")
def test_dpp_proto_auth_resp_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in Auth Resp"""
run_dpp_proto_auth_resp_missing(dev, 24, "Missing or invalid required Wrapped Data attribute")
def test_dpp_proto_auth_resp_i_nonce_mismatch(dev, apdev):
"""DPP protocol testing - I-nonce mismatch in Auth Resp"""
run_dpp_proto_init(dev, 0, 30, mutual=True)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "I-nonce mismatch" not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[0].wait_event(["DPP-RX"], timeout=1)
if ev is None or "type=0" not in ev:
raise Exception("DPP Authentication Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected DPP message seen")
def test_dpp_proto_auth_resp_incompatible_r_capab(dev, apdev):
"""DPP protocol testing - Incompatible R-capab in Auth Resp"""
run_dpp_proto_init(dev, 0, 31, mutual=True)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Unexpected role in R-capabilities 0x02" not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Peer reported incompatible R-capab role" not in ev:
raise Exception("Unexpected failure: " + ev)
def test_dpp_proto_auth_resp_r_auth_mismatch(dev, apdev):
"""DPP protocol testing - R-auth mismatch in Auth Resp"""
run_dpp_proto_init(dev, 0, 32, mutual=True)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Mismatching Responder Authenticating Tag" not in ev:
raise Exception("Unexpected failure: " + ev)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Peer reported authentication failure" not in ev:
raise Exception("Unexpected failure: " + ev)
def run_dpp_proto_auth_conf_missing(dev, test, reason):
run_dpp_proto_init(dev, 1, test, mutual=True)
if reason is None:
time.sleep(0.1)
return
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
def test_dpp_proto_auth_conf_no_status(dev, apdev):
"""DPP protocol testing - no Status in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 25, "Missing or invalid required DPP Status attribute")
def test_dpp_proto_auth_conf_invalid_status(dev, apdev):
"""DPP protocol testing - invalid Status in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 75, "Authentication failed")
def test_dpp_proto_auth_conf_no_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - no R-bootstrap key in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 26, "Missing or invalid required Responder Bootstrapping Key Hash attribute")
def test_dpp_proto_auth_conf_invalid_r_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid R-bootstrap key in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 72, "Responder Bootstrapping Key Hash mismatch")
def test_dpp_proto_auth_conf_no_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - no I-bootstrap key in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 27, "Missing Initiator Bootstrapping Key Hash attribute")
def test_dpp_proto_auth_conf_invalid_i_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid I-bootstrap key in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 73, "Initiator Bootstrapping Key Hash mismatch")
def test_dpp_proto_auth_conf_no_i_auth(dev, apdev):
"""DPP protocol testing - no I-Auth in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 28, "Missing or invalid Initiator Authenticating Tag")
def test_dpp_proto_auth_conf_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in Auth Conf"""
run_dpp_proto_auth_conf_missing(dev, 29, "Missing or invalid required Wrapped Data attribute")
def test_dpp_proto_auth_conf_i_auth_mismatch(dev, apdev):
"""DPP protocol testing - I-auth mismatch in Auth Conf"""
run_dpp_proto_init(dev, 1, 33, mutual=True)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Mismatching Initiator Authenticating Tag" not in ev:
raise Excception("Unexpected failure: " + ev)
def test_dpp_proto_auth_conf_replaced_by_resp(dev, apdev):
"""DPP protocol testing - Auth Conf replaced by Resp"""
run_dpp_proto_init(dev, 1, 65, mutual=True)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if "Unexpected Authentication Response" not in ev:
raise Excception("Unexpected failure: " + ev)
def run_dpp_proto_conf_req_missing(dev, test, reason):
run_dpp_proto_init(dev, 0, test)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
def test_dpp_proto_conf_req_no_e_nonce(dev, apdev):
"""DPP protocol testing - no E-nonce in Conf Req"""
run_dpp_proto_conf_req_missing(dev, 51,
"Missing or invalid Enrollee Nonce attribute")
def test_dpp_proto_conf_req_invalid_e_nonce(dev, apdev):
"""DPP protocol testing - invalid E-nonce in Conf Req"""
run_dpp_proto_conf_req_missing(dev, 83,
"Missing or invalid Enrollee Nonce attribute")
def test_dpp_proto_conf_req_no_config_attr_obj(dev, apdev):
"""DPP protocol testing - no Config Attr Obj in Conf Req"""
run_dpp_proto_conf_req_missing(dev, 52,
"Missing or invalid Config Attributes attribute")
def test_dpp_proto_conf_req_invalid_config_attr_obj(dev, apdev):
"""DPP protocol testing - invalid Config Attr Obj in Conf Req"""
run_dpp_proto_conf_req_missing(dev, 76,
"Unsupported wi-fi_tech")
def test_dpp_proto_conf_req_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in Conf Req"""
run_dpp_proto_conf_req_missing(dev, 53,
"Missing or invalid required Wrapped Data attribute")
def run_dpp_proto_conf_resp_missing(dev, test, reason):
run_dpp_proto_init(dev, 1, test)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
def test_dpp_proto_conf_resp_no_e_nonce(dev, apdev):
"""DPP protocol testing - no E-nonce in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 54,
"Missing or invalid Enrollee Nonce attribute")
def test_dpp_proto_conf_resp_no_config_obj(dev, apdev):
"""DPP protocol testing - no Config Object in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 55,
"Missing required Configuration Object attribute")
def test_dpp_proto_conf_resp_no_status(dev, apdev):
"""DPP protocol testing - no Status in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 56,
"Missing or invalid required DPP Status attribute")
def test_dpp_proto_conf_resp_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 57,
"Missing or invalid required Wrapped Data attribute")
def test_dpp_proto_conf_resp_invalid_status(dev, apdev):
"""DPP protocol testing - invalid Status in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 58,
"Configurator rejected configuration")
def test_dpp_proto_conf_resp_e_nonce_mismatch(dev, apdev):
"""DPP protocol testing - E-nonce mismatch in Conf Resp"""
run_dpp_proto_conf_resp_missing(dev, 59,
"Enrollee Nonce mismatch")
def test_dpp_proto_stop_at_auth_req(dev, apdev):
"""DPP protocol testing - stop when receiving Auth Req"""
run_dpp_proto_init(dev, 0, 87)
ev = dev[1].wait_event(["DPP-AUTH-INIT-FAILED"], timeout=5)
if ev is None:
raise Exception("Authentication init failure not reported")
def test_dpp_proto_stop_at_auth_resp(dev, apdev):
"""DPP protocol testing - stop when receiving Auth Resp"""
run_dpp_proto_init(dev, 1, 88)
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("Auth Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("Auth Resp TX not seen")
ev = dev[1].wait_event(["DPP-TX "], timeout=0.1)
if ev is not None:
raise Exception("Unexpected Auth Conf TX")
def test_dpp_proto_stop_at_auth_conf(dev, apdev):
"""DPP protocol testing - stop when receiving Auth Conf"""
run_dpp_proto_init(dev, 0, 89, init_enrollee=True)
ev = dev[1].wait_event(["GAS-QUERY-START"], timeout=10)
if ev is None:
raise Exception("Enrollee did not start GAS")
ev = dev[1].wait_event(["GAS-QUERY-DONE"], timeout=10)
if ev is None:
raise Exception("Enrollee did not time out GAS")
if "result=TIMEOUT" not in ev:
raise Exception("Unexpected GAS result: " + ev)
def test_dpp_proto_stop_at_conf_req(dev, apdev):
"""DPP protocol testing - stop when receiving Auth Req"""
run_dpp_proto_init(dev, 1, 90)
ev = dev[0].wait_event(["GAS-QUERY-START"], timeout=10)
if ev is None:
raise Exception("Enrollee did not start GAS")
ev = dev[0].wait_event(["GAS-QUERY-DONE"], timeout=10)
if ev is None:
raise Exception("Enrollee did not time out GAS")
if "result=TIMEOUT" not in ev:
raise Exception("Unexpected GAS result: " + ev)
def run_dpp_proto_init_pkex(dev, test_dev, test):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[test_dev].set("dpp_test", str(test))
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 code=secret" % id1
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (initiator)")
def test_dpp_proto_after_wrapped_data_pkex_cr_req(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in PKEX CR Req"""
run_dpp_proto_init_pkex(dev, 1, 4)
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None or "type=7" not in ev:
raise Exception("PKEX Exchange Request not seen")
ev = dev[0].wait_event(["DPP-RX"], timeout=5)
if ev is None or "type=9" not in ev:
raise Exception("PKEX Commit-Reveal Request not seen")
if "ignore=invalid-attributes" not in ev:
raise Exception("Unexpected RX info: " + ev)
def test_dpp_proto_after_wrapped_data_pkex_cr_resp(dev, apdev):
"""DPP protocol testing - attribute after Wrapped Data in PKEX CR Resp"""
run_dpp_proto_init_pkex(dev, 0, 5)
ev = dev[1].wait_event(["DPP-RX"], timeout=5)
if ev is None or "type=8" not in ev:
raise Exception("PKEX Exchange Response not seen")
ev = dev[1].wait_event(["DPP-RX"], timeout=5)
if ev is None or "type=10" not in ev:
raise Exception("PKEX Commit-Reveal Response not seen")
if "ignore=invalid-attributes" not in ev:
raise Exception("Unexpected RX info: " + ev)
def run_dpp_proto_pkex_req_missing(dev, test, reason):
run_dpp_proto_init_pkex(dev, 1, test)
ev = dev[0].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
def run_dpp_proto_pkex_resp_missing(dev, test, reason):
run_dpp_proto_init_pkex(dev, 0, test)
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None:
raise Exception("DPP failure not seen")
if reason not in ev:
raise Exception("Unexpected failure: " + ev)
def test_dpp_proto_pkex_exchange_req_no_finite_cyclic_group(dev, apdev):
"""DPP protocol testing - no Finite Cyclic Group in PKEX Exchange Request"""
run_dpp_proto_pkex_req_missing(dev, 34,
"Missing or invalid Finite Cyclic Group attribute")
def test_dpp_proto_pkex_exchange_req_no_encrypted_key(dev, apdev):
"""DPP protocol testing - no Encrypted Key in PKEX Exchange Request"""
run_dpp_proto_pkex_req_missing(dev, 35,
"Missing Encrypted Key attribute")
def test_dpp_proto_pkex_exchange_resp_no_status(dev, apdev):
"""DPP protocol testing - no Status in PKEX Exchange Response"""
run_dpp_proto_pkex_resp_missing(dev, 36, "No DPP Status attribute")
def test_dpp_proto_pkex_exchange_resp_no_encrypted_key(dev, apdev):
"""DPP protocol testing - no Encrypted Key in PKEX Exchange Response"""
run_dpp_proto_pkex_resp_missing(dev, 37, "Missing Encrypted Key attribute")
def test_dpp_proto_pkex_cr_req_no_bootstrap_key(dev, apdev):
"""DPP protocol testing - no Bootstrap Key in PKEX Commit-Reveal Request"""
run_dpp_proto_pkex_req_missing(dev, 38,
"No valid peer bootstrapping key found")
def test_dpp_proto_pkex_cr_req_no_i_auth_tag(dev, apdev):
"""DPP protocol testing - no I-Auth Tag in PKEX Commit-Reveal Request"""
run_dpp_proto_pkex_req_missing(dev, 39, "No valid u (I-Auth tag) found")
def test_dpp_proto_pkex_cr_req_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in PKEX Commit-Reveal Request"""
run_dpp_proto_pkex_req_missing(dev, 40, "Missing or invalid required Wrapped Data attribute")
def test_dpp_proto_pkex_cr_resp_no_bootstrap_key(dev, apdev):
"""DPP protocol testing - no Bootstrap Key in PKEX Commit-Reveal Response"""
run_dpp_proto_pkex_resp_missing(dev, 41,
"No valid peer bootstrapping key found")
def test_dpp_proto_pkex_cr_resp_no_r_auth_tag(dev, apdev):
"""DPP protocol testing - no R-Auth Tag in PKEX Commit-Reveal Response"""
run_dpp_proto_pkex_resp_missing(dev, 42, "No valid v (R-Auth tag) found")
def test_dpp_proto_pkex_cr_resp_no_wrapped_data(dev, apdev):
"""DPP protocol testing - no Wrapped Data in PKEX Commit-Reveal Response"""
run_dpp_proto_pkex_resp_missing(dev, 43, "Missing or invalid required Wrapped Data attribute")
def test_dpp_proto_pkex_exchange_req_invalid_encrypted_key(dev, apdev):
"""DPP protocol testing - invalid Encrypted Key in PKEX Exchange Request"""
run_dpp_proto_pkex_req_missing(dev, 44,
"Invalid Encrypted Key value")
def test_dpp_proto_pkex_exchange_resp_invalid_encrypted_key(dev, apdev):
"""DPP protocol testing - invalid Encrypted Key in PKEX Exchange Response"""
run_dpp_proto_pkex_resp_missing(dev, 45,
"Invalid Encrypted Key value")
def test_dpp_proto_pkex_exchange_resp_invalid_status(dev, apdev):
"""DPP protocol testing - invalid Status in PKEX Exchange Response"""
run_dpp_proto_pkex_resp_missing(dev, 46,
"PKEX failed (peer indicated failure)")
def test_dpp_proto_pkex_cr_req_invalid_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid Bootstrap Key in PKEX Commit-Reveal Request"""
run_dpp_proto_pkex_req_missing(dev, 47,
"Peer bootstrapping key is invalid")
def test_dpp_proto_pkex_cr_resp_invalid_bootstrap_key(dev, apdev):
"""DPP protocol testing - invalid Bootstrap Key in PKEX Commit-Reveal Response"""
run_dpp_proto_pkex_resp_missing(dev, 48,
"Peer bootstrapping key is invalid")
def test_dpp_proto_pkex_cr_req_i_auth_tag_mismatch(dev, apdev):
"""DPP protocol testing - I-auth tag mismatch in PKEX Commit-Reveal Request"""
run_dpp_proto_pkex_req_missing(dev, 49, "No valid u (I-Auth tag) found")
def test_dpp_proto_pkex_cr_resp_r_auth_tag_mismatch(dev, apdev):
"""DPP protocol testing - R-auth tag mismatch in PKEX Commit-Reveal Response"""
run_dpp_proto_pkex_resp_missing(dev, 50, "No valid v (R-Auth tag) found")
def test_dpp_proto_stop_at_pkex_exchange_resp(dev, apdev):
"""DPP protocol testing - stop when receiving PKEX Exchange Response"""
run_dpp_proto_init_pkex(dev, 1, 84)
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Resp not seen")
ev = dev[1].wait_event(["DPP-TX "], timeout=0.1)
if ev is not None:
raise Exception("Unexpected PKEX CR Req TX")
def test_dpp_proto_stop_at_pkex_cr_req(dev, apdev):
"""DPP protocol testing - stop when receiving PKEX CR Request"""
run_dpp_proto_init_pkex(dev, 0, 85)
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Resp not seen")
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX CR Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=0.1)
if ev is not None:
raise Exception("Unexpected PKEX CR Resp TX")
def test_dpp_proto_stop_at_pkex_cr_resp(dev, apdev):
"""DPP protocol testing - stop when receiving PKEX CR Response"""
run_dpp_proto_init_pkex(dev, 1, 86)
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX Exchange Resp not seen")
ev = dev[1].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX CR Req TX not seen")
ev = dev[0].wait_event(["DPP-TX "], timeout=5)
if ev is None:
raise Exception("PKEX CR Resp TX not seen")
ev = dev[1].wait_event(["DPP-TX "], timeout=0.1)
if ev is not None:
raise Exception("Unexpected Auth Req TX")
def test_dpp_proto_network_introduction(dev, apdev):
"""DPP protocol testing - network introduction"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "3059301306072a8648ce3d020106082a8648ce3d03010703420004d02e5bd81a120762b5f0f2994777f5d40297238a6c294fd575cdf35fabec44c050a6421c401d98d659fd2ed13c961cc8287944dd3202f516977800d3ab2f39ee"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJzOEFrYjg5bTV4UGhoYk5UbTVmVVo0eVBzNU5VMkdxYXNRY3hXUWhtQVFRIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIwOHF4TlNYRzRWemdCV3BjVUdNSmc1czNvbElOVFJsRVQ1aERpNkRKY3ZjIiwieSI6IlVhaGFYQXpKRVpRQk1YaHRUQnlZZVlrOWtJYjk5UDA3UV9NcW9TVVZTVEkifX0.a5_nfMVr7Qe1SW0ZL3u6oQRm5NUCYUSfixDAJOUFN3XUfECBZ6E8fm8xjeSfdOytgRidTz0CTlIRjzPQo82dmQ"
ap_netaccesskey = "30770201010420f6531d17f29dfab655b7c9e923478d5a345164c489aadd44a3519c3e9dcc792da00a06082a8648ce3d030107a14403420004d3cab13525c6e15ce0056a5c506309839b37a2520d4d19444f98438ba0c972f751a85a5c0cc911940131786d4c1c9879893d9086fdf4fd3b43f32aa125154932"
sta_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJzOEFrYjg5bTV4UGhoYk5UbTVmVVo0eVBzNU5VMkdxYXNRY3hXUWhtQVFRIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6InN0YSJ9XSwibmV0QWNjZXNzS2V5Ijp7Imt0eSI6IkVDIiwiY3J2IjoiUC0yNTYiLCJ4IjoiZWMzR3NqQ3lQMzVBUUZOQUJJdEltQnN4WXVyMGJZX1dES1lfSE9zUGdjNCIsInkiOiJTRS1HVllkdWVnTFhLMU1TQXZNMEx2QWdLREpTNWoyQVhCbE9PMTdUSTRBIn19.PDK9zsGlK-e1pEOmNxVeJfCS8pNeay6ckIS1TXCQsR64AR-9wFPCNVjqOxWvVKltehyMFqVAtOcv0IrjtMJFqQ"
sta_netaccesskey = "30770201010420bc33380c26fd2168b69cd8242ed1df07ba89aa4813f8d4e8523de6ca3f8dd28ba00a06082a8648ce3d030107a1440342000479cdc6b230b23f7e40405340048b48981b3162eaf46d8fd60ca63f1ceb0f81ce484f8655876e7a02d72b531202f3342ef020283252e63d805c194e3b5ed32380"
params = { "ssid": "dpp",
"wpa": "2",
"wpa_key_mgmt": "DPP",
"ieee80211w": "2",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
for test in [ 60, 61, 80, 82 ]:
dev[0].set("dpp_test", str(test))
dev[0].connect("dpp", key_mgmt="DPP", scan_freq="2412", ieee80211w="2",
dpp_csign=csign, dpp_connector=sta_connector,
dpp_netaccesskey=sta_netaccesskey, wait_connect=False)
ev = dev[0].wait_event(["DPP-TX"], timeout=10)
if ev is None or "type=5" not in ev:
raise Exception("Peer Discovery Request TX not reported")
ev = dev[0].wait_event(["DPP-TX-STATUS"], timeout=2)
if ev is None or "result=SUCCESS" not in ev:
raise Exception("Peer Discovery Request TX status not reported")
ev = hapd.wait_event(["DPP-RX"], timeout=10)
if ev is None or "type=5" not in ev:
raise Exception("Peer Discovery Request RX not reported")
if test == 80:
ev = dev[0].wait_event(["DPP-INTRO"], timeout=10)
if ev is None:
raise Exception("DPP-INTRO not reported for test 80")
if "status=7" not in ev:
raise Exception("Unexpected result in test 80: " + ev)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
hapd.dump_monitor()
dev[0].set("dpp_test", "0")
for test in [ 62, 63, 64, 77, 78, 79 ]:
hapd.set("dpp_test", str(test))
dev[0].connect("dpp", key_mgmt="DPP", scan_freq="2412", ieee80211w="2",
dpp_csign=csign, dpp_connector=sta_connector,
dpp_netaccesskey=sta_netaccesskey, wait_connect=False)
ev = dev[0].wait_event(["DPP-INTRO"], timeout=10)
if ev is None:
raise Exception("Peer introduction result not reported (test %d)" % test)
if test == 77:
if "fail=transaction_id_mismatch" not in ev:
raise Exception("Connector validation failure not reported")
elif test == 78:
if "status=254" not in ev:
raise Exception("Invalid status value not reported")
elif test == 79:
if "fail=peer_connector_validation_failed" not in ev:
raise Exception("Connector validation failure not reported")
elif "status=" in ev:
raise Exception("Unexpected peer introduction result (test %d): " % test + ev)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
hapd.dump_monitor()
hapd.set("dpp_test", "0")
dev[0].connect("dpp", key_mgmt="DPP", scan_freq="2412", ieee80211w="2",
dpp_csign=csign, dpp_connector=sta_connector,
dpp_netaccesskey=sta_netaccesskey)
def test_dpp_qr_code_no_chan_list_unicast(dev, apdev):
"""DPP QR Code and no channel list (unicast)"""
run_dpp_qr_code_chan_list(dev, apdev, True, 2417, None)
def test_dpp_qr_code_chan_list_unicast(dev, apdev):
"""DPP QR Code and 2.4 GHz channels (unicast)"""
run_dpp_qr_code_chan_list(dev, apdev, True, 2417,
"81/1,81/2,81/3,81/4,81/5,81/6,81/7,81/8,81/9,81/10,81/11,81/12,81/13")
def test_dpp_qr_code_chan_list_no_peer_unicast(dev, apdev):
"""DPP QR Code and channel list and no peer (unicast)"""
run_dpp_qr_code_chan_list(dev, apdev, True, 2417, "81/1,81/6,81/11",
no_wait=True)
ev = dev[1].wait_event(["DPP-AUTH-INIT-FAILED"], timeout=5)
if ev is None:
raise Exception("Initiation failure not reported")
def test_dpp_qr_code_no_chan_list_broadcast(dev, apdev):
"""DPP QR Code and no channel list (broadcast)"""
run_dpp_qr_code_chan_list(dev, apdev, False, 2412, None)
def test_dpp_qr_code_chan_list_broadcast(dev, apdev):
"""DPP QR Code and some 2.4 GHz channels (broadcast)"""
run_dpp_qr_code_chan_list(dev, apdev, False, 2412, "81/1,81/6,81/11",
timeout=10)
def run_dpp_qr_code_chan_list(dev, apdev, unicast, listen_freq, chanlist,
no_wait=False, timeout=5):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[1].set("dpp_init_max_tries", "3")
dev[1].set("dpp_init_retry_time", "100")
dev[1].set("dpp_resp_wait_time", "1000")
logger.info("dev0 displays QR Code")
cmd = "DPP_BOOTSTRAP_GEN type=qrcode"
if chanlist:
cmd += " chan=" + chanlist
if unicast:
addr = dev[0].own_addr().replace(':', '')
cmd += " mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 initiates DPP Authentication")
cmd = "DPP_LISTEN %d" % listen_freq
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d" % id1
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
if no_wait:
return
ev = dev[0].wait_event(["DPP-AUTH-SUCCESS"], timeout=timeout)
if ev is None:
raise Exception("DPP authentication did not succeed (Responder)")
ev = dev[1].wait_event(["DPP-AUTH-SUCCESS"], timeout=5)
if ev is None:
raise Exception("DPP authentication did not succeed (Initiator)")
ev = dev[0].wait_event(["DPP-CONF-RECEIVED", "DPP-CONF-FAILED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
dev[0].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_dpp_qr_code_chan_list_no_match(dev, apdev):
"""DPP QR Code and no matching supported channel"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=123/123"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
cmd = "DPP_AUTH_INIT peer=%d" % id1
if "FAIL" not in dev[1].request(cmd):
raise Exception("DPP Authentication started unexpectedly")
def test_dpp_pkex_alloc_fail(dev, apdev):
"""DPP/PKEX and memory allocation failures"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ (1, "=dpp_keygen_configurator"),
(1, "base64_gen_encode;dpp_keygen_configurator") ]
for count, func in tests:
with alloc_fail(dev[1], count, func):
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" not in res:
raise Exception("Unexpected DPP_CONFIGURATOR_ADD success")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
# Local error cases on the Initiator
tests = [ (1, "dpp_get_pubkey_point"),
(1, "dpp_alloc_msg;dpp_pkex_build_exchange_req"),
(1, "dpp_alloc_msg;dpp_pkex_build_commit_reveal_req"),
(1, "dpp_alloc_msg;dpp_auth_build_req"),
(1, "dpp_alloc_msg;dpp_auth_build_conf"),
(1, "dpp_bootstrap_key_hash"),
(1, "dpp_auth_init"),
(1, "=dpp_auth_resp_rx"),
(2, "=dpp_auth_resp_rx"),
(1, "dpp_build_conf_start"),
(1, "dpp_build_conf_obj_dpp"),
(2, "dpp_build_conf_obj_dpp"),
(3, "dpp_build_conf_obj_dpp"),
(4, "dpp_build_conf_obj_dpp"),
(5, "dpp_build_conf_obj_dpp"),
(6, "dpp_build_conf_obj_dpp"),
(7, "dpp_build_conf_obj_dpp"),
(8, "dpp_build_conf_obj_dpp"),
(1, "dpp_conf_req_rx"),
(2, "dpp_conf_req_rx"),
(3, "dpp_conf_req_rx"),
(4, "dpp_conf_req_rx"),
(5, "dpp_conf_req_rx"),
(6, "dpp_conf_req_rx"),
(7, "dpp_conf_req_rx"),
(1, "dpp_pkex_init"),
(2, "dpp_pkex_init"),
(3, "dpp_pkex_init"),
(1, "dpp_pkex_derive_z"),
(1, "=dpp_pkex_rx_commit_reveal_resp"),
(1, "dpp_get_pubkey_point;dpp_build_jwk"),
(2, "dpp_get_pubkey_point;dpp_build_jwk"),
(1, "dpp_get_pubkey_point;dpp_auth_init") ]
for count, func in tests:
dev[0].request("DPP_STOP_LISTEN")
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
with alloc_fail(dev[1], count, func):
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 conf=sta-dpp configurator=%d code=secret" % (id1, conf_id)
dev[1].request(cmd)
wait_fail_trigger(dev[1], "GET_ALLOC_FAIL", max_iter=100)
ev = dev[0].wait_event(["GAS-QUERY-START"], timeout=0.01)
if ev:
dev[0].request("DPP_STOP_LISTEN")
dev[0].wait_event(["GAS-QUERY-DONE"], timeout=3)
# Local error cases on the Responder
tests = [ (1, "dpp_get_pubkey_point"),
(1, "dpp_alloc_msg;dpp_pkex_build_exchange_resp"),
(1, "dpp_alloc_msg;dpp_pkex_build_commit_reveal_resp"),
(1, "dpp_alloc_msg;dpp_auth_build_resp"),
(1, "dpp_get_pubkey_point;dpp_auth_build_resp_ok"),
(1, "=dpp_auth_req_rx"),
(2, "=dpp_auth_req_rx"),
(1, "=dpp_auth_conf_rx"),
(1, "json_parse;dpp_parse_jws_prot_hdr"),
(1, "json_get_member_base64url;dpp_parse_jws_prot_hdr"),
(1, "json_get_member_base64url;dpp_parse_jwk"),
(2, "json_get_member_base64url;dpp_parse_jwk"),
(1, "json_parse;dpp_parse_connector"),
(1, "dpp_parse_jwk;dpp_parse_connector"),
(1, "dpp_parse_jwk;dpp_parse_cred_dpp"),
(1, "dpp_get_pubkey_point;dpp_check_pubkey_match"),
(1, "base64_gen_decode;dpp_process_signed_connector"),
(1, "dpp_parse_jws_prot_hdr;dpp_process_signed_connector"),
(2, "base64_gen_decode;dpp_process_signed_connector"),
(3, "base64_gen_decode;dpp_process_signed_connector"),
(4, "base64_gen_decode;dpp_process_signed_connector"),
(1, "json_parse;dpp_parse_conf_obj"),
(1, "dpp_conf_resp_rx"),
(1, "=dpp_pkex_derive_z"),
(1, "=dpp_pkex_rx_exchange_req"),
(2, "=dpp_pkex_rx_exchange_req"),
(3, "=dpp_pkex_rx_exchange_req"),
(1, "=dpp_pkex_rx_commit_reveal_req"),
(1, "dpp_get_pubkey_point;dpp_pkex_rx_commit_reveal_req"),
(1, "dpp_bootstrap_key_hash") ]
for count, func in tests:
dev[0].request("DPP_STOP_LISTEN")
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
with alloc_fail(dev[0], count, func):
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 conf=sta-dpp configurator=%d code=secret" % (id1, conf_id)
dev[1].request(cmd)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL", max_iter=100)
ev = dev[0].wait_event(["GAS-QUERY-START"], timeout=0.01)
if ev:
dev[0].request("DPP_STOP_LISTEN")
dev[0].wait_event(["GAS-QUERY-DONE"], timeout=3)
def test_dpp_pkex_test_fail(dev, apdev):
"""DPP/PKEX and local failures"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ (1, "dpp_keygen_configurator") ]
for count, func in tests:
with fail_test(dev[1], count, func):
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" not in res:
raise Exception("Unexpected DPP_CONFIGURATOR_ADD success")
tests = [ (1, "dpp_keygen") ]
for count, func in tests:
with fail_test(dev[1], count, func):
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd);
if "FAIL" not in res:
raise Exception("Unexpected DPP_BOOTSTRAP_GEN success")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
# Local error cases on the Initiator
tests = [ (1, "aes_siv_encrypt;dpp_auth_build_req"),
(1, "os_get_random;dpp_auth_init"),
(1, "dpp_derive_k1;dpp_auth_init"),
(1, "dpp_hkdf_expand;dpp_derive_k1;dpp_auth_init"),
(1, "dpp_gen_i_auth;dpp_auth_build_conf"),
(1, "aes_siv_encrypt;dpp_auth_build_conf"),
(1, "dpp_derive_k2;dpp_auth_resp_rx"),
(1, "dpp_hkdf_expand;dpp_derive_k2;dpp_auth_resp_rx"),
(1, "dpp_derive_ke;dpp_auth_resp_rx"),
(1, "dpp_hkdf_expand;dpp_derive_ke;dpp_auth_resp_rx"),
(1, "dpp_gen_r_auth;dpp_auth_resp_rx"),
(1, "aes_siv_encrypt;dpp_build_conf_resp"),
(1, "dpp_pkex_derive_Qi;dpp_pkex_build_exchange_req"),
(1, "aes_siv_encrypt;dpp_pkex_build_commit_reveal_req"),
(1, "hmac_sha256_vector;dpp_pkex_rx_exchange_resp"),
(1, "aes_siv_decrypt;dpp_pkex_rx_commit_reveal_resp"),
(1, "hmac_sha256_vector;dpp_pkex_rx_commit_reveal_resp") ]
for count, func in tests:
dev[0].request("DPP_STOP_LISTEN")
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
with fail_test(dev[1], count, func):
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 conf=sta-dpp configurator=%d code=secret" % (id1, conf_id)
dev[1].request(cmd)
wait_fail_trigger(dev[1], "GET_FAIL", max_iter=100)
ev = dev[0].wait_event(["GAS-QUERY-START"], timeout=0.01)
if ev:
dev[0].request("DPP_STOP_LISTEN")
dev[0].wait_event(["GAS-QUERY-DONE"], timeout=3)
# Local error cases on the Responder
tests = [ (1, "aes_siv_encrypt;dpp_auth_build_resp"),
(1, "os_get_random;dpp_build_conf_req"),
(1, "aes_siv_encrypt;dpp_build_conf_req"),
(1, "os_get_random;dpp_auth_build_resp_ok"),
(1, "dpp_derive_k2;dpp_auth_build_resp_ok"),
(1, "dpp_derive_ke;dpp_auth_build_resp_ok"),
(1, "dpp_gen_r_auth;dpp_auth_build_resp_ok"),
(1, "aes_siv_encrypt;dpp_auth_build_resp_ok"),
(1, "dpp_derive_k1;dpp_auth_req_rx"),
(1, "aes_siv_decrypt;dpp_auth_req_rx"),
(1, "aes_siv_decrypt;dpp_auth_conf_rx"),
(1, "dpp_gen_i_auth;dpp_auth_conf_rx"),
(1, "dpp_check_pubkey_match"),
(1, "aes_siv_decrypt;dpp_conf_resp_rx"),
(1, "hmac_sha256_kdf;dpp_pkex_derive_z"),
(1, "dpp_pkex_derive_Qi;dpp_pkex_rx_exchange_req"),
(1, "dpp_pkex_derive_Qr;dpp_pkex_rx_exchange_req"),
(1, "aes_siv_encrypt;dpp_pkex_build_commit_reveal_resp"),
(1, "aes_siv_decrypt;dpp_pkex_rx_commit_reveal_req"),
(1, "hmac_sha256_vector;dpp_pkex_rx_commit_reveal_req"),
(2, "hmac_sha256_vector;dpp_pkex_rx_commit_reveal_req") ]
for count, func in tests:
dev[0].request("DPP_STOP_LISTEN")
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id0)
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
with fail_test(dev[0], count, func):
cmd = "DPP_PKEX_ADD own=%d identifier=test init=1 conf=sta-dpp configurator=%d code=secret" % (id1, conf_id)
dev[1].request(cmd)
wait_fail_trigger(dev[0], "GET_FAIL", max_iter=100)
ev = dev[0].wait_event(["GAS-QUERY-START"], timeout=0.01)
if ev:
dev[0].request("DPP_STOP_LISTEN")
dev[0].wait_event(["GAS-QUERY-DONE"], timeout=3)
| 42.68028
| 1,038
| 0.663018
| 25,248
| 182,885
| 4.616247
| 0.029824
| 0.07015
| 0.04513
| 0.049644
| 0.871919
| 0.852082
| 0.832116
| 0.797591
| 0.765639
| 0.730487
| 0
| 0.057106
| 0.216092
| 182,885
| 4,284
| 1,039
| 42.690243
| 0.755861
| 0.063034
| 0
| 0.705124
| 0
| 0.0041
| 0.357217
| 0.100859
| 0.000293
| 0
| 0.00007
| 0
| 0
| 1
| 0.067936
| false
| 0.002343
| 0.00205
| 0
| 0.071157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
86b8c65054d6a037f9f5a9700fbcb989282edc4e
| 3,234
|
py
|
Python
|
tests/explanations_mining/test_descriptions.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
tests/explanations_mining/test_descriptions.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
tests/explanations_mining/test_descriptions.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
from explanations_mining.descriptions_new import Description2, Atom
def test_equal_descriptions():
d1=Description2(body=[Atom('?x1','created','?x'), Atom('?x','rdf:type','wordnet_book_106410904')], head=Atom('?x','label','book'))
d2 = Description2(body=[Atom('?x1', 'created', '?x'), Atom('?x', 'rdf:type', 'wordnet_book_106410904')],
head=Atom('?x', 'label', 'book'))
assert(d1==d1)
assert(d1==d2)
d3 = Description2(body=[Atom('?x', 'rdf:type', 'wordnet_book_106410904'), Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
assert(d1==d3)
d4 = Description2(body=[Atom('?x', 'rdf:type', 'wordnet_book_106410904'), Atom('?y', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
assert (d1 == d4)
def test_not_equal_descriptions():
d1=Description2(body=[Atom('?x1','created','?x'), Atom('?x','rdf:type','wordnet_book_106410904')], head=Atom('?x','label','book'))
d2 = Description2(body=[Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'book'))
assert(d1==d1)
assert(d1!=d2)
d3 = Description2(body=[Atom('?x', 'rdf:type', '?x4'), Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
assert(d1!=d3)
d4 = Description2(body=[Atom('?x', 'rdf:type', '?z'), Atom('?x7', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
assert (d3 == d4)
d5 = Description2(body=[Atom('?x', 'rdf:type', '?z'), Atom('ahmed', 'created', '?x'), Atom('mahmoud', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
d6 = Description2(
body=[ Atom('tuka', 'created', '?x'), Atom('?x', 'rdf:type', '?z'), Atom('mahmoud', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
assert(d5!=d6)
def test_descriptions_set():
ds=set()
d1 = Description2(body=[Atom('?x1', 'created', '?x'), Atom('?x', 'rdf:type', 'wordnet_book_106410904')],
head=Atom('?x', 'label', 'book'))
ds.add(d1)
d2 = Description2(body=[Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'book'))
ds.add(d2)
assert(len(ds)==2)
d3 = Description2(body=[Atom('?x', 'rdf:type', 'wordnet_book_106410904'), Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
ds.add(d3)
assert(len(ds)==2)
d5 = Description2(
body=[Atom('?x', 'rdf:type', '?z'), Atom('ahmed', 'created', '?x'), Atom('mahmoud', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
ds.add(d5)
assert(len(ds)==3)
d6 = Description2(
body=[Atom('tuka', 'created', '?x'), Atom('?x', 'rdf:type', '?z'), Atom('mahmoud', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
ds.add(d6)
assert(len(ds)==4)
d4 = Description2(body=[Atom('?x', 'rdf:type', '?z'), Atom('?x7', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
ds.add(d4)
assert (len(ds) == 5)
d8= Description2(body=[Atom('?x', 'rdf:type', '?x4'), Atom('?x1', 'created', '?x')],
head=Atom('?x', 'label', 'song'))
ds.add(d8)
assert (len(ds) == 5)
| 39.925926
| 134
| 0.505875
| 404
| 3,234
| 3.992574
| 0.116337
| 0.099194
| 0.210787
| 0.147551
| 0.874768
| 0.874768
| 0.874768
| 0.868568
| 0.861128
| 0.83695
| 0
| 0.056814
| 0.221707
| 3,234
| 80
| 135
| 40.425
| 0.584029
| 0
| 0
| 0.532258
| 0
| 0
| 0.24203
| 0.047663
| 0
| 0
| 0
| 0
| 0.241935
| 1
| 0.048387
| false
| 0
| 0.016129
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86bb0dc45f90e41b4f4315433bf28a093637c1ee
| 8,655
|
py
|
Python
|
tests/test_event_triggers.py
|
hongduc1402/RESEARCH-
|
22d59bdf983896fc77444ea7d90d6510fd4ff3f4
|
[
"MIT"
] | 7
|
2020-12-15T09:31:20.000Z
|
2022-02-07T05:56:00.000Z
|
tests/test_event_triggers.py
|
hongduc1402/RESEARCH-
|
22d59bdf983896fc77444ea7d90d6510fd4ff3f4
|
[
"MIT"
] | 1
|
2021-06-02T04:27:20.000Z
|
2021-06-02T04:27:20.000Z
|
tests/test_event_triggers.py
|
hongduc1402/RESEARCH-
|
22d59bdf983896fc77444ea7d90d6510fd4ff3f4
|
[
"MIT"
] | 3
|
2021-09-12T18:53:18.000Z
|
2022-02-13T23:49:44.000Z
|
import unittest
from cam_config import mute_possible_events
xml_event_triggers_text = '''<?xml version="1.0" encoding="UTF-8"?>
<EventNotification version="2.0">
<EventTriggerList version="2.0">
<EventTrigger>
<id>VMD-1</id>
<eventType>VMD</eventType>
<eventDescription>VMD Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
<EventTriggerNotification>
<id>record-1</id>
<notificationMethod>record</notificationMethod>
<videoInputID>1</videoInputID>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>tamper-1</id>
<eventType>tamperdetection</eventType>
<eventDescription>shelteralarm Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>diskfull</id>
<eventType>diskfull</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
<EventTriggerNotification>
<id>email</id>
<notificationMethod>email</notificationMethod>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>diskerror</id>
<eventType>diskerror</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
<EventTriggerNotification>
<id>email</id>
<notificationMethod>email</notificationMethod>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>nicbroken</id>
<eventType>nicbroken</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>ipconflict</id>
<eventType>ipconflict</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>illaccess</id>
<eventType>illaccess</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>linedetection-1</id>
<eventType>linedetection</eventType>
<eventDescription>Linedetection Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>fielddetection-1</id>
<eventType>fielddetection</eventType>
<eventDescription>fielddetection Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>videomismatch</id>
<eventType>videomismatch</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
<EventTriggerNotification>
<id>beep</id>
<notificationMethod>beep</notificationMethod>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>badvideo</id>
<eventType>badvideo</eventType>
<eventDescription>exception Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>PIR</id>
<eventType>PIR</eventType>
<eventDescription>PIR Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
<EventTriggerNotification>
<id>record-1</id>
<notificationMethod>record</notificationMethod>
<videoInputID>1</videoInputID>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
<EventTriggerNotification>
<id>beep</id>
<notificationMethod>beep</notificationMethod>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
<EventTriggerNotification>
<id>center</id>
<notificationMethod>center</notificationMethod>
<notificationRecurrence>beginning</notificationRecurrence>
</EventTriggerNotification>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>facedetection-1</id>
<eventType>facedetection</eventType>
<eventDescription>facedetection Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
<EventTrigger>
<id>storageDetection-1</id>
<eventType>storageDetection</eventType>
<eventDescription>storageDetection Event trigger Information</eventDescription>
<videoInputChannelID>1</videoInputChannelID>
<dynVideoInputChannelID>1</dynVideoInputChannelID>
<EventTriggerNotificationList>
</EventTriggerNotificationList>
</EventTrigger>
</EventTriggerList>
</EventNotification>
'''
class TestEventTriggers(unittest.TestCase):
def test_mute_all_possible_events(self):
events = mute_possible_events(xml_event_triggers_text)
expected_events = [
'VMD-1',
'tamper-1',
'diskfull',
'diskerror',
'nicbroken',
'ipconflict',
'illaccess',
'linedetection-1',
'fielddetection-1',
'videomismatch',
'badvideo',
'PIR',
'facedetection-1',
'storageDetection-1'
]
self.assertEqual(expected_events, events)
if __name__ == '__main__':
unittest.main()
| 43.059701
| 91
| 0.61814
| 466
| 8,655
| 11.427039
| 0.145923
| 0.036808
| 0.120939
| 0.123568
| 0.78216
| 0.78216
| 0.777277
| 0.763005
| 0.752864
| 0.714742
| 0
| 0.008351
| 0.294396
| 8,655
| 200
| 92
| 43.275
| 0.863599
| 0
| 0
| 0.65285
| 0
| 0
| 0.930445
| 0.546043
| 0
| 0
| 0
| 0
| 0.005181
| 1
| 0.005181
| false
| 0
| 0.010363
| 0
| 0.020725
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
86cce2125189598dc6f187d40c0d63f0514cf43b
| 35,381
|
py
|
Python
|
tests/xcactivity-parser-test.py
|
DyCI/xcactivity-parser
|
20745f95b67be1b5cde25ef961908453508f9416
|
[
"MIT"
] | 8
|
2016-05-17T20:11:05.000Z
|
2019-05-03T22:12:42.000Z
|
tests/xcactivity-parser-test.py
|
DyCI/xcactivity-parser
|
20745f95b67be1b5cde25ef961908453508f9416
|
[
"MIT"
] | 2
|
2015-02-05T21:21:55.000Z
|
2015-03-03T17:38:01.000Z
|
tests/xcactivity-parser-test.py
|
DyCI/xcactivity-parser
|
20745f95b67be1b5cde25ef961908453508f9416
|
[
"MIT"
] | null | null | null |
import subprocess
import six
script_path = './src/xcactivity-parser.py'
# helpers
def string_from_std(std):
if six.PY2:
return std
return str(std, encoding='utf8')
def run_subprocess(params):
sp = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
return string_from_std(out), string_from_std(err)
# tests
def test_script_run():
out, err = run_subprocess([script_path])
assert len(err) != 0
def test_usage_message_on_no_params():
out, err = run_subprocess([script_path])
assert "Usage:" in err
def test_usage_message_on_only_file_parameter():
out, err = run_subprocess([script_path, "-x", "/"])
assert "Usage:" in err
def test_usage_message_on_only_directory_parameter():
out, err = run_subprocess([script_path, "-f", "/"])
assert "Usage:" in err
def test_usage_message_on_only_arch_parameter():
out, err = run_subprocess([script_path, "-a", "i386"])
assert "Usage:" in err
def test_searching_message_on_two_parameters():
out, err = run_subprocess([script_path, "-f", "/", "-x", "/"])
assert "Usage:" in err
def test_searching_message_on_all_parameters():
out, err = run_subprocess([script_path, "-f", "/", "-x", "/", "-a", "i386"])
assert not "Usage:" in err
def test_full_path_for_file():
out, err = run_subprocess([script_path, "-f", "asd", "-x", "/"])
assert "full path" in err
def test_directory_existance():
out, err = run_subprocess([script_path, "-f", "/", "-x", "asfsd"])
assert "directory" in err
assert "exist" in err
# Working with real files :)
fixtures_path = './tests/fixtures/one-file'
fixtures_file_with_spaces_path = './tests/fixtures/file-with-spaces'
fixtures_with_multiple_files_path = './tests/fixtures/multiple-files'
def test_arguments_on_existed_file():
existingfile = "/Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch i386 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=gnu99 -fobjc-arc -fmodules -fmodules-cache-path=/Users/ptaykalo/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.1.sdk -fexceptions -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -g -Wno-sign-conversion -fobjc-abi-version=2 -fobjc-legacy-dispatch -mios-simulator-version-min=8.1 -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources/i386 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.dia -c /Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.o"
out, err = run_subprocess([script_path, "-f", existingfile, "-x", fixtures_path, "-a", "i386"])
assert not "Usage:" in err
# print out
# print err
assert compilationString == out.strip()
def test_arguments_on_existed_file_with_different_architecture():
existingfile = "/Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=gnu99 -fobjc-arc -fmodules -fmodules-cache-path=/Users/ptaykalo/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.1.sdk -fexceptions -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -g -Wno-sign-conversion -fobjc-abi-version=2 -fobjc-legacy-dispatch -mios-simulator-version-min=8.1 -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.dia -c /Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.o"
out, err = run_subprocess([script_path, "-f", existingfile, "-x", fixtures_path, "-a", "x86_64"])
assert not "Usage:" in err
# print out
# print err
assert compilationString == out.strip()
def test_arguments_on_existed_file_with_non_existed_architecture():
existingfile = "/Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch i386 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=gnu99 -fobjc-arc -fmodules -fmodules-cache-path=/Users/ptaykalo/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.1.sdk -fexceptions -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -g -Wno-sign-conversion -fobjc-abi-version=2 -fobjc-legacy-dispatch -mios-simulator-version-min=8.1 -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources/i386 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.dia -c /Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/i386/SFInjectionsNotificationsCenter.o"
out, err = run_subprocess([script_path, "-f", existingfile, "-x", fixtures_path, "-a", "x86_64"])
assert not "Usage:" in err
# print out
# print err
assert compilationString != out.strip()
def test_for_filesearch_with_spaces():
existingfile = "/Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=c99 -fobjc-arc -Wno-trigraphs -fpascal-strings -O0 -Wno-missing-field-initializers -Wno-missing-prototypes -Wno-implicit-atomic-properties -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wno-missing-braces -Wparentheses -Wswitch -Wno-unused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wno-empty-body -Wno-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wno-constant-conversion -Wno-int-conversion -Wno-bool-conversion -Wno-enum-conversion -Wno-shorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wno-undeclared-selector -Wno-deprecated-implementations -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -mmacosx-version-min=10.7 -g -Wno-sign-conversion -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.dia -c /Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.o"
out, err = run_subprocess(
[script_path, "-f", existingfile, "-x", fixtures_file_with_spaces_path, "-a", "x86_64"])
assert not "Usage:" in err
assert compilationString == out.strip()
def test_for_search_in_multiple_xcactivitylog():
# First file in directory with two xcactivitylog files
existingfile = "/Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=c99 -fobjc-arc -Wno-trigraphs -fpascal-strings -O0 -Wno-missing-field-initializers -Wno-missing-prototypes -Wno-implicit-atomic-properties -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wno-missing-braces -Wparentheses -Wswitch -Wno-unused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wno-empty-body -Wno-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wno-constant-conversion -Wno-int-conversion -Wno-bool-conversion -Wno-enum-conversion -Wno-shorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wno-undeclared-selector -Wno-deprecated-implementations -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -mmacosx-version-min=10.7 -g -Wno-sign-conversion -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.dia -c /Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.o"
out, err = run_subprocess(
[script_path, "-f", existingfile, "-x", fixtures_with_multiple_files_path, "-a", "x86_64"])
assert not "Usage:" in err
assert compilationString == out.strip()
# Second file in directory with two xcactivitylog files
existingfile = "/Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=gnu99 -fobjc-arc -fmodules -fmodules-cache-path=/Users/ptaykalo/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.1.sdk -fexceptions -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -g -Wno-sign-conversion -fobjc-abi-version=2 -fobjc-legacy-dispatch -mios-simulator-version-min=8.1 -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.dia -c /Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.o"
out, err = run_subprocess([script_path, "-f", existingfile, "-x", fixtures_with_multiple_files_path, "-a", "x86_64"])
assert not "Usage:" in err
assert compilationString == out.strip()
def test_for_filesearch_with_working_dir():
existingfile = "/Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m"
working_directory = "/Volumes/data/Projects/dyci-main/Support/Xcode/Source"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=c99 -fobjc-arc -Wno-trigraphs -fpascal-strings -O0 -Wno-missing-field-initializers -Wno-missing-prototypes -Wno-implicit-atomic-properties -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wno-missing-braces -Wparentheses -Wswitch -Wno-unused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wno-empty-body -Wno-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wno-constant-conversion -Wno-int-conversion -Wno-bool-conversion -Wno-enum-conversion -Wno-shorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wno-undeclared-selector -Wno-deprecated-implementations -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -mmacosx-version-min=10.7 -g -Wno-sign-conversion -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/SFDYCIPlugin-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Products/Debug -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.dia -c /Volumes/data/Projects/dyci-main/Support/Xcode/Source/Classes/Directory\ With\ Spaces/SomeGeneralDyciFile.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/SFDYCIPlugin-fazyfnchxursjxbrqdmlnerzwutq/Build/Intermediates/SFDYCIPlugin.build/Debug/SFDYCIPlugin.build/Objects-normal/x86_64/SomeGeneralDyciFile.o"
out, err = run_subprocess(
[script_path, "-f", existingfile, "-x", fixtures_file_with_spaces_path, "-a", "x86_64", "-w"])
assert not "Usage:" in err
result = out.strip().splitlines()
assert len(result) == 2
assert compilationString == result[1]
assert working_directory == result[0]
def test_for_filesearch_with_working_dir_with_spaces():
existingfile = "/Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m"
working_directory = "/Volumes/data/Projects/dyci-main/Dynamic Code Injection/dyci-framework"
compilationString = "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch x86_64 -fmessage-length=0 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -std=gnu99 -fobjc-arc -fmodules -fmodules-cache-path=/Users/ptaykalo/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-receiver-is-weak -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.1.sdk -fexceptions -fasm-blocks -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -g -Wno-sign-conversion -fobjc-abi-version=2 -fobjc-legacy-dispatch -mios-simulator-version-min=8.1 -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-generated-files.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-own-target-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-all-target-headers.hmap -iquote /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/dyci-project-headers.hmap -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator/include -I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources/x86_64 -I/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/DerivedSources -F/Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Products/Release-iphonesimulator -MMD -MT dependencies -MF /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.d --serialize-diagnostics /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.dia -c /Volumes/data/Projects/dyci-main/Dynamic\ Code\ Injection/dyci/Classes/Notifications/SFInjectionsNotificationsCenter.m -o /Users/ptaykalo/Library/Developer/Xcode/DerivedData/DYCI-hfgqlcmijbqsjtcnjyhbxgpcckwj/Build/Intermediates/dyci-framework.build/Release-iphonesimulator/dyci.build/Objects-normal/x86_64/SFInjectionsNotificationsCenter.o"
out, err = run_subprocess([script_path, "-f", existingfile, "-x", fixtures_path, "-a", "x86_64", "-w"])
assert not "Usage:" in err
result = out.strip().splitlines()
assert len(result) == 2
assert compilationString == result[1]
assert working_directory == result[0]
| 226.801282
| 3,957
| 0.83624
| 4,252
| 35,381
| 6.907103
| 0.066322
| 0.041166
| 0.063332
| 0.091832
| 0.979366
| 0.97654
| 0.975552
| 0.96595
| 0.961728
| 0.954408
| 0
| 0.011055
| 0.043781
| 35,381
| 155
| 3,958
| 228.264516
| 0.857029
| 0.005851
| 0
| 0.5
| 0
| 0.16
| 0.886004
| 0.735374
| 0
| 0
| 0
| 0
| 0.35
| 1
| 0.18
| false
| 0
| 0.02
| 0
| 0.23
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8130376ab9b578ce4cba5ad5ebc2704a781e11f4
| 129
|
py
|
Python
|
osbot_aws/lambdas/shell/shell_server.py
|
pbx-gs/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 2
|
2019-04-19T07:42:08.000Z
|
2019-06-23T11:46:18.000Z
|
osbot_aws/lambdas/shell/shell_server.py
|
pbx-gs/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 8
|
2020-02-16T23:43:07.000Z
|
2021-02-26T01:58:20.000Z
|
osbot_aws/lambdas/shell/shell_server.py
|
owasp-sbot/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 3
|
2020-02-16T15:45:58.000Z
|
2021-02-11T01:04:58.000Z
|
from osbot_aws.apis.shell.Shell_Server import Shell_Server
def run(event, context=None):
return Shell_Server().invoke(event)
| 32.25
| 58
| 0.79845
| 20
| 129
| 4.95
| 0.7
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100775
| 129
| 4
| 59
| 32.25
| 0.853448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d4a8759d3e0477021491f95822ac8f7623e91603
| 8,567
|
py
|
Python
|
pyNastran/converters/nastran/gui/results.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T09:49:24.000Z
|
2021-08-02T09:49:24.000Z
|
pyNastran/converters/nastran/gui/results.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 1
|
2021-06-07T16:33:59.000Z
|
2021-06-07T16:33:59.000Z
|
pyNastran/converters/nastran/gui/results.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 1
|
2021-10-14T03:52:44.000Z
|
2021-10-14T03:52:44.000Z
|
from copy import deepcopy
import numpy as np
from pyNastran.gui.gui_objects.table import Table
class LayeredTableResults(Table):
def __init__(self, subcase_id, headers, eids, eid_max, scalars,
methods,
data_formats=None,
nlabels=None, labelsize=None, ncolors=None, colormap='jet',
set_max_min=False, uname='LayeredTableResults'):
"""this is a centroidal result
Parameters
----------
headers : List[str]
the sidebar word
titles : List[str]
the legend title
"""
location = 'centroid'
titles = None
Table.__init__(
self, subcase_id, location, titles, headers, scalars,
data_formats=data_formats, nlabels=nlabels,
labelsize=labelsize, ncolors=ncolors,
colormap=colormap, set_max_min=set_max_min,
uname=uname)
self.methods = methods
self.eids = eids
self.eid_max = eid_max
def finalize(self):
self.titles_default = deepcopy(self.titles)
self.headers_default = deepcopy(self.headers)
def get_methods(self, i):
return self.methods
def deflects(self, unused_i, unused_res_name):
return False
def get_default_title(self, i, name):
"""legend title"""
(itime, ilayer, imethod, unused_header) = name
return self.methods[imethod]
def get_title(self, i, name):
"""legend title"""
(itime, ilayer, imethod, unused_header) = name
return self.methods[imethod]
def get_header(self, i, name):
"""a header shows up in the text"""
(itime, ilayer, imethod, header) = name
return self.methods[imethod] + ': ' + header
def get_data_format(self, i, name):
return '%.3f' # TODO: update
def get_default_data_format(self, i, name):
return '%.3f' # TODO: update
def get_scale(self, i, name):
return None
def get_default_scale(self, i, name):
return None
def get_scalar(self, i, name):
return self.get_result(i, name)
def get_magnitude(self, i, name):
scalar = self.get_scalar(i, name) # TODO: update
mag = scalar
if scalar.dtype.name in ['complex64']:
mag = np.sqrt(scalar.real ** 2 + scalar.imag ** 2)
return mag
def get_min_max(self, i, name):
mag = self.get_magnitude(i, name)
if np.any(np.isfinite(mag)):
return np.nanmin(mag), np.nanmax(mag)
return np.nan, np.nan
def get_default_min_max(self, i, name):
mag = self.get_magnitude(i, name)
if np.any(np.isfinite(mag)):
return np.nanmin(mag), np.nanmax(mag)
return np.nan, np.nan
def get_result(self, i, name):
(itime, ilayer, imethod, unused_header) = name
scalars = self.scalars[itime, :, ilayer, imethod]
if len(scalars) == self.eid_max:
return scalars
data = np.full(self.eid_max, np.nan, dtype=scalars.dtype)
#print(f'data.shape={data.shape} eids.shape={self.eids.shape} scalars.shape={scalars.shape}')
#print(self.methods)
data[self.eids] = scalars
return data
def __repr__(self):
"""defines str(self)"""
msg = f'LayeredTableResults:\n'
msg += f' title={self.titles!r}\n'
msg += f' subcase_id={self.subcase_id}\n'
msg += f' data_type={self.data_type!r}\n'
msg += f' is_real={self.is_real} is_complex={self.is_complex}\n'
msg += f' location={self.location!r}\n'
msg += f' header={self.headers!r}\n'
msg += f' methods={self.methods}\n'
msg += f' data_format={self.data_formats!r}\n'
msg += f' uname={self.uname!r}\n'
return msg
class SimpleTableResults(Table):
def __init__(self, subcase_id, headers, eids, eid_max, scalars,
methods,
data_format=None,
nlabels=None, labelsize=None, ncolors=None, colormap='jet',
location='centroid',
set_max_min=False, uname='Geometry'):
"""this is a centroidal result
Parameters
----------
headers : List[str]
the sidebar word
titles : List[str]
the legend title
"""
titles = None
assert data_format is not None, data_format
ntimes = scalars.shape[0]
data_formats = [data_format] * len(methods) * ntimes
Table.__init__(
self, subcase_id, location, titles, headers, scalars,
data_formats=data_formats, nlabels=nlabels,
labelsize=labelsize, ncolors=ncolors,
colormap=colormap, set_max_min=set_max_min,
uname=uname)
self.methods = methods
self.eids = eids
self.eid_max = eid_max
assert len(eids) == scalars.shape[1], f'len(eids)={len(eids)} scalars.shape={scalars.shape}'
if self.is_complex:
self.phases = np.zeros(len(methods) * ntimes)
def finalize(self):
self.titles_default = deepcopy(self.titles)
self.headers_default = deepcopy(self.headers)
def get_methods(self, i):
return self.methods
#def deflects(self, unused_i, unused_res_name):
#return False
def get_default_title(self, i, name):
"""legend title"""
(itime, imethod, unused_header) = name
return self.methods[imethod]
def get_title(self, i, name):
"""legend title"""
(itime, imethod, unused_header) = name
return self.methods[imethod]
def get_header(self, i, name):
"""a header shows up in the text"""
(itime, imethod, header) = name
return self.methods[imethod] + ': ' + header
#def get_data_format(self, i, name):
#return '%.3f' # TODO: update
#def get_default_data_format(self, i, name):
#return '%.3f' # TODO: update
def get_scale(self, i, name):
return None
def get_default_scale(self, i, name):
return None
def get_scalar(self, i, name):
return self.get_result(i, name)
def get_magnitude(self, i, name):
scalar = self.get_scalar(i, name) # TODO: update
mag = scalar
if scalar.dtype.name in ['complex64']:
mag = np.sqrt(scalar.real ** 2 + scalar.imag ** 2)
return mag
def get_min_max(self, i, name):
mag = self.get_magnitude(i, name)
return np.nanmin(mag), np.nanmax(mag)
def get_default_min_max(self, i, name):
mag = self.get_magnitude(i, name)
return np.nanmin(mag), np.nanmax(mag)
def get_phase(self, i, name):
if self.is_real:
return None
j = self._get_j(i, name)
return self.phases[j]
def get_result(self, i, name):
#print(i, name)
(itime, imethod, unused_header) = name
scalars = self.scalars[itime, :, imethod]
if len(scalars) == self.eid_max:
return scalars
data = np.full(self.eid_max, np.nan, dtype=scalars.dtype)
#print(f'data.shape={data.shape} eids.shape={self.eids.shape} scalars.shape={scalars.shape}')
#print(self.methods)
try:
data[self.eids] = scalars
except IndexError:
raise RuntimeError(f'{self.uname!r} eids.max()={self.eids.max()} scalars.shape={scalars.shape}')
return data
def _get_j(self, i, name):
(itime, imethod, unused_header) = name
ntimes = self.scalars.shape[0]
j = ntimes * imethod + itime
return j
def get_data_format(self, i, name):
j = self._get_j(i, name)
try:
return self.data_formats[j]
except IndexError:
print(f'data_formats = {self.data_formats}')
print(str(self))
print("ires =", i)
print(name)
raise
def __repr__(self):
"""defines str(self)"""
msg = f'SimpleTableResults:\n'
msg += f' title={self.titles!r}\n'
msg += f' subcase_id={self.subcase_id}\n'
msg += f' data_type={self.data_type!r}\n'
msg += f' is_real={self.is_real} is_complex={self.is_complex}\n'
msg += f' location={self.location!r}\n'
msg += f' header={self.headers!r}\n'
msg += f' methods={self.methods}\n'
msg += f' data_format={self.data_formats!r}\n'
msg += f' uname={self.uname!r}\n'
return msg
| 33.464844
| 108
| 0.577332
| 1,096
| 8,567
| 4.364964
| 0.107664
| 0.039716
| 0.050794
| 0.031355
| 0.82337
| 0.815426
| 0.799749
| 0.785535
| 0.754181
| 0.733278
| 0
| 0.002496
| 0.298471
| 8,567
| 255
| 109
| 33.596078
| 0.793511
| 0.10412
| 0
| 0.777778
| 0
| 0.005556
| 0.119861
| 0.091831
| 0
| 0
| 0
| 0.007843
| 0.011111
| 1
| 0.188889
| false
| 0
| 0.016667
| 0.061111
| 0.411111
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4b26cb33e88c6e7c18f854b92d60cc3d7eb8f68
| 64,150
|
py
|
Python
|
sdk/communication/azure-communication-administration/azure/communication/administration/_phonenumber/_generated/aio/operations_async/_phone_number_administration_operations_async.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 1
|
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/communication/azure-communication-administration/azure/communication/administration/_phonenumber/_generated/aio/operations_async/_phone_number_administration_operations_async.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/communication/azure-communication-administration/azure/communication/administration/_phonenumber/_generated/aio/operations_async/_phone_number_administration_operations_async.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PhoneNumberAdministrationOperations:
"""PhoneNumberAdministrationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.communication.administration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_all_phone_numbers(
self,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.AcquiredPhoneNumbers"]:
"""Gets the list of the acquired phone numbers.
Gets the list of the acquired phone numbers.
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AcquiredPhoneNumbers or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.AcquiredPhoneNumbers]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AcquiredPhoneNumbers"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_all_phone_numbers.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AcquiredPhoneNumbers', pipeline_response)
list_of_elem = deserialized.phone_numbers
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_all_phone_numbers.metadata = {'url': '/administration/phonenumbers/phonenumbers'} # type: ignore
async def get_all_area_codes(
self,
location_type: str,
country_code: str,
phone_plan_id: str,
location_options: Optional[List["models.LocationOptionsQuery"]] = None,
**kwargs
) -> "models.AreaCodes":
"""Gets a list of the supported area codes.
Gets a list of the supported area codes.
:param location_type: The type of location information required by the plan.
:type location_type: str
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_id: The plan id from which to search area codes.
:type phone_plan_id: str
:param location_options: Represents the underlying list of countries.
:type location_options: list[~azure.communication.administration.models.LocationOptionsQuery]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AreaCodes, or the result of cls(response)
:rtype: ~azure.communication.administration.models.AreaCodes
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AreaCodes"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.LocationOptionsQueries(location_options=location_options)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.get_all_area_codes.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['locationType'] = self._serialize.query("location_type", location_type, 'str')
query_parameters['phonePlanId'] = self._serialize.query("phone_plan_id", phone_plan_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'LocationOptionsQueries')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AreaCodes', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_all_area_codes.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/areacodes'} # type: ignore
async def get_capabilities_update(
self,
capabilities_update_id: str,
**kwargs
) -> "models.UpdatePhoneNumberCapabilitiesResponse":
"""Get capabilities by capabilities update id.
Get capabilities by capabilities update id.
:param capabilities_update_id:
:type capabilities_update_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdatePhoneNumberCapabilitiesResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.UpdatePhoneNumberCapabilitiesResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UpdatePhoneNumberCapabilitiesResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_capabilities_update.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'capabilitiesUpdateId': self._serialize.url("capabilities_update_id", capabilities_update_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UpdatePhoneNumberCapabilitiesResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_capabilities_update.metadata = {'url': '/administration/phonenumbers/capabilities/{capabilitiesUpdateId}'} # type: ignore
async def update_capabilities(
self,
phone_number_capabilities_update: Dict[str, "models.NumberUpdateCapabilities"],
**kwargs
) -> "models.UpdateNumberCapabilitiesResponse":
"""Adds or removes phone number capabilities.
Adds or removes phone number capabilities.
:param phone_number_capabilities_update: The map of phone numbers to the capabilities update
applied to the phone number.
:type phone_number_capabilities_update: dict[str, ~azure.communication.administration.models.NumberUpdateCapabilities]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateNumberCapabilitiesResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.UpdateNumberCapabilitiesResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UpdateNumberCapabilitiesResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.UpdateNumberCapabilitiesRequest(phone_number_capabilities_update=phone_number_capabilities_update)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_capabilities.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'UpdateNumberCapabilitiesRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UpdateNumberCapabilitiesResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_capabilities.metadata = {'url': '/administration/phonenumbers/capabilities'} # type: ignore
def get_all_supported_countries(
self,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberCountries"]:
"""Gets a list of supported countries.
Gets a list of supported countries.
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberCountries or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberCountries]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberCountries"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_all_supported_countries.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PhoneNumberCountries', pipeline_response)
list_of_elem = deserialized.countries
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_all_supported_countries.metadata = {'url': '/administration/phonenumbers/countries'} # type: ignore
async def get_number_configuration(
self,
phone_number: str,
**kwargs
) -> "models.NumberConfigurationResponse":
"""Endpoint for getting number configurations.
Endpoint for getting number configurations.
:param phone_number: The phone number in the E.164 format.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NumberConfigurationResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.NumberConfigurationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NumberConfigurationResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfigurationPhoneNumber(phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.get_number_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfigurationPhoneNumber')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('NumberConfigurationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_number_configuration.metadata = {'url': '/administration/phonenumbers/numberconfiguration'} # type: ignore
async def configure_number(
self,
pstn_configuration: "models.PstnConfiguration",
phone_number: str,
**kwargs
) -> None:
"""Endpoint for configuring a pstn number.
Endpoint for configuring a pstn number.
:param pstn_configuration: Definition for pstn number configuration.
:type pstn_configuration: ~azure.communication.administration.models.PstnConfiguration
:param phone_number: The phone number to configure.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfiguration(pstn_configuration=pstn_configuration, phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.configure_number.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfiguration')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
configure_number.metadata = {'url': '/administration/phonenumbers/numberconfiguration/configure'} # type: ignore
async def unconfigure_number(
self,
phone_number: str,
**kwargs
) -> None:
"""Endpoint for unconfiguring a pstn number by removing the configuration.
Endpoint for unconfiguring a pstn number by removing the configuration.
:param phone_number: The phone number in the E.164 format.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfigurationPhoneNumber(phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.unconfigure_number.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfigurationPhoneNumber')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
unconfigure_number.metadata = {'url': '/administration/phonenumbers/numberconfiguration/unconfigure'} # type: ignore
def get_phone_plan_groups(
self,
country_code: str,
locale: Optional[str] = "en-US",
include_rate_information: Optional[bool] = False,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhonePlanGroups"]:
"""Gets a list of phone plan groups for the given country.
Gets a list of phone plan groups for the given country.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param include_rate_information:
:type include_rate_information: bool
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhonePlanGroups or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhonePlanGroups]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhonePlanGroups"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_phone_plan_groups.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
if include_rate_information is not None:
query_parameters['includeRateInformation'] = self._serialize.query("include_rate_information", include_rate_information, 'bool')
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PhonePlanGroups', pipeline_response)
list_of_elem = deserialized.phone_plan_groups
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_phone_plan_groups.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups'} # type: ignore
def get_phone_plans(
self,
country_code: str,
phone_plan_group_id: str,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhonePlansResponse"]:
"""Gets a list of phone plans for a phone plan group.
Gets a list of phone plans for a phone plan group.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_group_id:
:type phone_plan_group_id: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhonePlansResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhonePlansResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhonePlansResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_phone_plans.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
'phonePlanGroupId': self._serialize.url("phone_plan_group_id", phone_plan_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
'phonePlanGroupId': self._serialize.url("phone_plan_group_id", phone_plan_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PhonePlansResponse', pipeline_response)
list_of_elem = deserialized.phone_plans
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_phone_plans.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups/{phonePlanGroupId}/phoneplans'} # type: ignore
async def get_phone_plan_location_options(
self,
country_code: str,
phone_plan_group_id: str,
phone_plan_id: str,
locale: Optional[str] = "en-US",
**kwargs
) -> "models.LocationOptionsResponse":
"""Gets a list of location options for a phone plan.
Gets a list of location options for a phone plan.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_group_id:
:type phone_plan_group_id: str
:param phone_plan_id:
:type phone_plan_id: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocationOptionsResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.LocationOptionsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocationOptionsResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_phone_plan_location_options.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
'phonePlanGroupId': self._serialize.url("phone_plan_group_id", phone_plan_group_id, 'str'),
'phonePlanId': self._serialize.url("phone_plan_id", phone_plan_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('LocationOptionsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_phone_plan_location_options.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups/{phonePlanGroupId}/phoneplans/{phonePlanId}/locationoptions'} # type: ignore
async def get_release_by_id(
self,
release_id: str,
**kwargs
) -> "models.PhoneNumberRelease":
"""Gets a release by a release id.
Gets a release by a release id.
:param release_id: Represents the release id.
:type release_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PhoneNumberRelease, or the result of cls(response)
:rtype: ~azure.communication.administration.models.PhoneNumberRelease
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberRelease"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_release_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'releaseId': self._serialize.url("release_id", release_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PhoneNumberRelease', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_release_by_id.metadata = {'url': '/administration/phonenumbers/releases/{releaseId}'} # type: ignore
async def release_phone_numbers(
self,
phone_numbers: List[str],
**kwargs
) -> "models.ReleaseResponse":
"""Creates a release for the given phone numbers.
Creates a release for the given phone numbers.
:param phone_numbers: The list of phone numbers in the release request.
:type phone_numbers: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReleaseResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.ReleaseResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ReleaseResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.ReleaseRequest(phone_numbers=phone_numbers)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.release_phone_numbers.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'ReleaseRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ReleaseResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
release_phone_numbers.metadata = {'url': '/administration/phonenumbers/releases'} # type: ignore
def get_all_releases(
self,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberEntities"]:
"""Gets a list of all releases.
Gets a list of all releases.
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberEntities or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberEntities]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberEntities"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_all_releases.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PhoneNumberEntities', pipeline_response)
list_of_elem = deserialized.entities
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_all_releases.metadata = {'url': '/administration/phonenumbers/releases'} # type: ignore
async def get_search_by_id(
self,
search_id: str,
**kwargs
) -> "models.PhoneNumberReservation":
"""Get search by search id.
Get search by search id.
:param search_id: The search id to be searched for.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PhoneNumberReservation, or the result of cls(response)
:rtype: ~azure.communication.administration.models.PhoneNumberReservation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberReservation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_search_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PhoneNumberReservation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_search_by_id.metadata = {'url': '/administration/phonenumbers/searches/{searchId}'} # type: ignore
async def create_search(
self,
body: Optional["models.CreateSearchOptions"] = None,
**kwargs
) -> "models.CreateSearchResponse":
"""Creates a phone number search.
Creates a phone number search.
:param body: Defines the search options.
:type body: ~azure.communication.administration.models.CreateSearchOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CreateSearchResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.CreateSearchResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CreateSearchResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'CreateSearchOptions')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CreateSearchResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_search.metadata = {'url': '/administration/phonenumbers/searches'} # type: ignore
def get_all_searches(
self,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberEntities"]:
"""Gets a list of all searches.
Gets a list of all searches.
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberEntities or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberEntities]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberEntities"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_all_searches.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['skip'] = self._serialize.query("skip", skip, 'int')
if take is not None:
query_parameters['take'] = self._serialize.query("take", take, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PhoneNumberEntities', pipeline_response)
list_of_elem = deserialized.entities
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_all_searches.metadata = {'url': '/administration/phonenumbers/searches'} # type: ignore
async def cancel_search(
self,
search_id: str,
**kwargs
) -> None:
"""Cancels the search. This means existing numbers in the search will be made available.
Cancels the search. This means existing numbers in the search will be made available.
:param search_id: The search id to be canceled.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.cancel_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
cancel_search.metadata = {'url': '/administration/phonenumbers/searches/{searchId}/cancel'} # type: ignore
async def purchase_search(
self,
search_id: str,
**kwargs
) -> None:
"""Purchases the phone number search.
Purchases the phone number search.
:param search_id: The search id to be purchased.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.purchase_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purchase_search.metadata = {'url': '/administration/phonenumbers/searches/{searchId}/purchase'} # type: ignore
| 47.695167
| 202
| 0.653141
| 6,976
| 64,150
| 5.800315
| 0.043005
| 0.018783
| 0.012851
| 0.017646
| 0.868917
| 0.846502
| 0.822332
| 0.801868
| 0.794553
| 0.78788
| 0
| 0.00814
| 0.247342
| 64,150
| 1,344
| 203
| 47.730655
| 0.829899
| 0.138987
| 0
| 0.804612
| 0
| 0
| 0.118789
| 0.051604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015777
| false
| 0
| 0.008495
| 0
| 0.082524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4c0fd4086cdcb34b8fbedd185493f36fe409fc6
| 138
|
py
|
Python
|
plugins/twitter/komand_twitter/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/twitter/komand_twitter/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/twitter/komand_twitter/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .block.action import Block
from .destroy.action import Destroy
from .post.action import Post
| 27.6
| 39
| 0.789855
| 22
| 138
| 4.954545
| 0.590909
| 0.330275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 138
| 4
| 40
| 34.5
| 0.931624
| 0.268116
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d4cb722ba0f4bb7cb004248955ecf49c9c9c5fa8
| 90
|
py
|
Python
|
wombat_db/engine/__init__.py
|
TomScheffers/wombat
|
c70adf924a04af0513f392cd01613e4597c1116a
|
[
"Apache-2.0"
] | 7
|
2021-03-25T17:21:54.000Z
|
2021-11-24T18:59:45.000Z
|
wombat_db/engine/__init__.py
|
TomScheffers/wombat
|
c70adf924a04af0513f392cd01613e4597c1116a
|
[
"Apache-2.0"
] | 1
|
2021-04-22T09:33:09.000Z
|
2021-04-22T10:28:32.000Z
|
wombat_db/engine/__init__.py
|
TomScheffers/wombat
|
c70adf924a04af0513f392cd01613e4597c1116a
|
[
"Apache-2.0"
] | null | null | null |
from wombat_db.engine.engine import Engine
from wombat_db.engine.udf import json_to_struct
| 45
| 47
| 0.877778
| 16
| 90
| 4.6875
| 0.5625
| 0.266667
| 0.32
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077778
| 90
| 2
| 47
| 45
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d4e2bdaf303aa57cf34d81fd6b728d3000675c4a
| 178
|
py
|
Python
|
artefact_nca/config/config.py
|
Ackey-code/3d-artefacts-nca
|
b13228d5dd30519ad885d2400061be2adf6cfc3c
|
[
"MIT"
] | 37
|
2021-05-26T03:41:07.000Z
|
2022-02-03T21:24:26.000Z
|
artefact_nca/config/config.py
|
Ackey-code/3d-artefacts-nca
|
b13228d5dd30519ad885d2400061be2adf6cfc3c
|
[
"MIT"
] | 1
|
2021-12-01T21:43:33.000Z
|
2021-12-01T21:43:33.000Z
|
artefact_nca/config/config.py
|
Ackey-code/3d-artefacts-nca
|
b13228d5dd30519ad885d2400061be2adf6cfc3c
|
[
"MIT"
] | 4
|
2021-06-07T17:29:13.000Z
|
2021-12-18T16:30:50.000Z
|
"""
Import your main config files here:
"""
from artefact_nca.config.base import DefaultConfig # noqa
from artefact_nca.config.voxel_nca_config import VoxelCAConfig # noqa
| 29.666667
| 70
| 0.780899
| 24
| 178
| 5.625
| 0.583333
| 0.2
| 0.222222
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 178
| 5
| 71
| 35.6
| 0.888158
| 0.258427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
076cd49d8d0c93267ea8b507c445806a443c0339
| 13,727
|
py
|
Python
|
FADiff/test_main.py
|
teamxvii/cs107-FinalProject
|
68c2a964bc993bc748da658929293699d51d8f4f
|
[
"MIT"
] | null | null | null |
FADiff/test_main.py
|
teamxvii/cs107-FinalProject
|
68c2a964bc993bc748da658929293699d51d8f4f
|
[
"MIT"
] | 3
|
2020-12-11T18:18:12.000Z
|
2020-12-12T15:19:18.000Z
|
FADiff/test_main.py
|
teamxvii/cs107FinalProject
|
68c2a964bc993bc748da658929293699d51d8f4f
|
[
"MIT"
] | 2
|
2020-12-11T22:39:53.000Z
|
2020-12-12T01:22:12.000Z
|
import pytest
import coverage
from FADiff import FADiff
import Elems
import numpy as np
from FuncVect import FuncVect
class TestClass:
FADiff.set_mode('forward')
def test_neg(self):
x = FADiff.new_scal(3)
assert -x.val == -3
assert -x.der == -1
def test_add(self):
x = FADiff.new_scal(3) + 5
assert x.val == 8
assert x.der == 1
y = FADiff.new_scal(3) + FADiff.new_scal(5)
assert y.val == 8
def test_radd(self):
x = 5 + FADiff.new_scal(3)
assert x.val == 8
assert x.der == 1
def test_sub(self):
x = FADiff.new_scal(3) - 5
assert x.val == -2
assert x.der == 1
y = FADiff.new_scal(3) - FADiff.new_scal(2)
assert y.val == 1
assert x.der == 1
def test_rsub(self):
x = 3 - FADiff.new_scal(3)
assert x.val == 0
assert x.der == 2
def test_mul(self):
x = FADiff.new_scal(3) * 3
assert x.val == 9
assert x.der == 3
y = FADiff.new_scal(3) * FADiff.new_scal(4)
assert y.val == 12
# assert y.der == 7
def test_rmul(self):
x = 3 * FADiff.new_scal(3)
assert x.val == 9
assert x.der == 3
def test_div(self):
x = FADiff.new_scal(3) / 3
assert x.val == 1
assert x.der == pytest.approx(0.3333333333333333)
y = FADiff.new_scal(3) / FADiff.new_scal(4)
assert y.val == pytest.approx(0.75)
# assert y.der == pytest.approx(0.0625)
def test_rdiv(self):
x = 3 / FADiff.new_scal(3)
assert x.val == 1
assert x.der == pytest.approx(-0.3333333333333333)
def test_pow(self):
x = FADiff.new_scal(3) ** 2
assert x.val == 9
assert x.der == 6
y = FADiff.new_scal(3) ** FADiff.new_scal(5)
assert y.val == 243
assert y.der[0] == 405
def test_rpow(self):
x = 2 ** FADiff.new_scal(3)
assert x.val == 8
assert x.der == pytest.approx(5.54517744)
assert FADiff._mode == 'forward'
FADiff.set_mode('reverse')
assert FADiff._mode == 'reverse'
def test_neg_reverse(self):
x = FADiff.new_scal(3)
assert -x.val == -3
assert -x.der == -1
def test_add_reverse(self):
x = FADiff.new_scal(3) + 5
assert x.val == 8
assert x.der == 1
y = FADiff.new_scal(3) + FADiff.new_scal(5)
assert y.val == 8
def test_radd_reverse(self):
x = 5 + FADiff.new_scal(3)
assert x.val == 8
assert x.der == 1
def test_sub_reverse(self):
x = FADiff.new_scal(3) - 5
assert x.val == -2
assert x.der == 1
y = FADiff.new_scal(3) - FADiff.new_scal(2)
assert y.val == 1
assert x.der == 1
def test_rsub_reverse(self):
x = 3 - FADiff.new_scal(3)
assert x.val == 0
assert x.der == 2
def test_mul_reverse(self):
x = FADiff.new_scal(3) * 3
assert x.val == 9
assert x.der == 3
y = FADiff.new_scal(3) * FADiff.new_scal(4)
assert y.val == 12
# assert y.der == 7
def test_rmul_reverse(self):
x = 3 * FADiff.new_scal(3)
assert x.val == 9
assert x.der == 3
def test_div_reverse(self):
x = FADiff.new_scal(3) / 3
assert x.val == 1
assert x.der == pytest.approx(0.3333333333333333)
y = FADiff.new_scal(3) / FADiff.new_scal(4)
assert y.val == pytest.approx(0.75)
# assert y.der == pytest.approx(0.0625)
def test_rdiv_reverse(self):
x = 3 / FADiff.new_scal(3)
assert x.val == 1
assert x.der == pytest.approx(-0.3333333333333333)
def test_pow_reverse(self):
x = FADiff.new_scal(3) ** 2
assert x.val == 9
assert x.der == 6
y = FADiff.new_scal(3) ** FADiff.new_scal(5)
assert y.val == 243
assert y.der[0] == 405
def test_rpow_reverse(self):
x = 2 ** FADiff.new_scal(3)
assert x.val == 8
assert x.der == pytest.approx(5.54517744)
assert FADiff._mode == 'reverse'
FADiff.set_mode('forward')
assert FADiff._mode == 'forward'
# Elems testing
def test_exp(self):
x = Elems.exp(FADiff.new_scal(3))
assert x.val == pytest.approx(20.085536923187668)
assert x.der == pytest.approx(20.085536923187668)
y = 10
assert Elems.exp(y) == np.exp(y)
def test_exp_reverse(self):
FADiff.set_mode('reverse')
x = Elems.exp(FADiff.new_scal(3))
assert x.val == pytest.approx(20.085536923187668)
assert x.der == pytest.approx(20.085536923187668)
y = 10
assert Elems.exp(y) == np.exp(y)
def test_cos(self):
FADiff.set_mode('forward')
x = Elems.cos(FADiff.new_scal(3))
assert x.val == pytest.approx(-0.9899924966004454)
assert x.der == pytest.approx(-0.1411200080598672)
y = 2
assert Elems.cos(y) == np.cos(y)
def test_cos_reverse(self):
FADiff.set_mode('reverse')
x = Elems.cos(FADiff.new_scal(3))
assert x.val == pytest.approx(-0.9899924966004454)
assert x.der == pytest.approx(-0.1411200080598672)
y = 2
assert Elems.cos(y) == np.cos(y)
def test_sin(self):
x = FADiff()
x.set_mode('forward')
x = x.new_scal(3)
a = Elems.sin(x)
assert a.val == pytest.approx(0.1411200080598672)
assert a.der == pytest.approx(-0.9899924966004454)
y = 2
assert Elems.sin(y) == np.sin(y)
def test_sin_reverse(self):
x = FADiff()
x.set_mode('reverse')
x = x.new_scal(3)
a = Elems.sin(x)
assert a.val == pytest.approx(0.1411200080598672)
assert a.der == pytest.approx(-0.9899924966004454)
y = 2
assert Elems.sin(y) == np.sin(y)
def test_tan(self):
FADiff.set_mode('forward')
x = Elems.tan(FADiff.new_scal(3))
assert x.val == pytest.approx(-0.1425465430742778)
assert x.der == pytest.approx(1.020319516942427)
y = 2
assert Elems.tan(y) == np.tan(y)
def test_tan_reverse(self):
FADiff.set_mode('reverse')
x = Elems.tan(FADiff.new_scal(3))
assert x.val == pytest.approx(-0.1425465430742778)
assert x.der == pytest.approx(1.020319516942427)
y = 2
assert Elems.tan(y) == np.tan(y)
def test_arcsin(self):
FADiff.set_mode('forward')
x = Elems.arcsin(FADiff.new_scal(0.3))
assert x.val == pytest.approx(0.30469265)
with pytest.warns(RuntimeWarning):
Elems.arcsin(-19)
y = -0.4
assert Elems.arcsin(y) == np.arcsin(y)
def test_arcsin_reverse(self):
FADiff.set_mode('reverse')
x = Elems.arcsin(FADiff.new_scal(0.3))
assert x.val == pytest.approx(0.30469265)
with pytest.warns(RuntimeWarning):
Elems.arcsin(-19)
y = -0.4
assert Elems.arcsin(y) == np.arcsin(y)
def test_arccos(self):
FADiff.set_mode('forward')
x = Elems.arccos(FADiff.new_scal(0.3))
assert x.val == pytest.approx(1.2661036727794992)
with pytest.warns(RuntimeWarning):
Elems.arccos(19)
y = -0.4
assert Elems.arccos(y) == np.arccos(y)
def test_arccos_reverse(self):
FADiff.set_mode('reverse')
x = Elems.arccos(FADiff.new_scal(0.3))
assert x.val == pytest.approx(1.2661036727794992)
with pytest.warns(RuntimeWarning):
Elems.arccos(19)
y = -0.4
assert Elems.arccos(y) == np.arccos(y)
def test_arctan(self):
FADiff.set_mode('forward')
x = Elems.arctan(FADiff.new_scal(0.5))
assert x.val == pytest.approx(0.4636476090008061)
y = -0.4
assert Elems.arctan(y) == np.arctan(y)
def test_arctan_reverse(self):
FADiff.set_mode('reverse')
x = Elems.arctan(FADiff.new_scal(0.5))
assert x.val == pytest.approx(0.4636476090008061)
y = -0.4
assert Elems.arctan(y) == np.arctan(y)
def test_sinh(self):
FADiff.set_mode('forward')
x = Elems.sinh(FADiff.new_scal(0.4))
assert x.val == pytest.approx(0.4107523258028155)
y = -0.4
assert Elems.sinh(y) == np.sinh(y)
def test_sinh_reverse(self):
FADiff.set_mode('reverse')
x = Elems.sinh(FADiff.new_scal(0.4))
assert x.val == pytest.approx(0.4107523258028155)
y = -0.4
assert Elems.sinh(y) == np.sinh(y)
def test_cosh(self):
FADiff.set_mode('forward')
x = Elems.cosh(FADiff.new_scal(0.3))
assert x.val == pytest.approx(1.04533851)
assert x.der == pytest.approx(0.30452029)
y = 4
assert Elems.cosh(y) == np.cosh(y)
def test_cosh_reverse(self):
FADiff.set_mode('reverse')
x = Elems.cosh(FADiff.new_scal(0.3))
assert x.val == pytest.approx(1.04533851)
assert x.der == pytest.approx(0.30452029)
y = 4
assert Elems.cosh(y) == np.cosh(y)
def test_tanh(self):
FADiff.set_mode('forward')
x = Elems.tanh(FADiff.new_scal(1))
assert x.val == pytest.approx(0.7615941559557649)
y = 2
assert Elems.tanh(y) == np.tanh(y)
def test_tanh_reverse(self):
FADiff.set_mode('reverse')
x = Elems.tanh(FADiff.new_scal(1))
assert x.val == pytest.approx(0.7615941559557649)
y = 2
assert Elems.tanh(y) == np.tanh(y)
def test_log(self):
FADiff.set_mode('forward')
x = Elems.log(FADiff.new_scal(0.3))
assert x.val == pytest.approx(-1.2039728)
y = 2
assert Elems.log(y) == pytest.approx(np.log(y) / np.log(np.e))
def test_log_reverse(self):
FADiff.set_mode('reverse')
x = Elems.log(FADiff.new_scal(0.3))
assert x.val == pytest.approx(-1.2039728)
y = 2
assert Elems.log(y) == pytest.approx(np.log(y) / np.log(np.e))
def test_logistic(self):
FADiff.set_mode('forward')
x = FADiff()
x = x.new_scal(2)
x = Elems.logistic(x)
assert x.val == pytest.approx(0.8807970779778823)
y = 4
assert Elems.logistic(y) == pytest.approx(0.9820137900379085)
def test_logistic_reverse(self):
FADiff.set_mode('reverse')
x = FADiff()
x = x.new_scal(2)
x = Elems.logistic(x)
assert x.val == pytest.approx(0.8807970779778823)
y = 4
assert Elems.logistic(y) == pytest.approx(0.9820137900379085)
def test_sqrt(self):
FADiff.set_mode('forward')
x = Elems.sqrt(FADiff.new_scal(3))
assert x.val == pytest.approx(1.7320508075688772)
y = 2
assert Elems.sqrt(y) == np.sqrt(y)
z = Elems.sqrt(FADiff.new_scal(-1))
with pytest.raises(AssertionError):
assert z.val == 1
def test_sqrt_reverse(self):
FADiff.set_mode('reverse')
x = Elems.sqrt(FADiff.new_scal(3))
assert x.val == pytest.approx(1.7320508075688772)
y = 2
assert Elems.sqrt(y) == np.sqrt(y)
z = Elems.sqrt(FADiff.new_scal(-1))
with pytest.raises(AssertionError):
assert z.val == 1
# FADiff class
def test_mode(self):
x = FADiff()
x.set_mode('forward')
assert x._mode == 'forward'
x.new_scal(3)
assert x._mode == 'forward'
x.set_mode('reverse')
assert x._mode == 'reverse'
x.new_scal(4)
x.set_mode('testing')
assert x._mode != 'forward' or x._mode != 'reverse'
y = FADiff()
y.set_mode('forward')
assert y._mode == 'forward'
y = y.new_vect(np.array([2, 3, 4]))
assert y.der is not None
z = FADiff()
z.set_mode('reverse')
z = z.new_vect(np.array([1, 2, 3]))
assert FADiff._mode == 'reverse'
# FuncVect class
def test_funcvect(self):
# forward mode scalar tests
x = FADiff()
x.set_mode('forward')
x = FADiff.new_scal(3)
y = FADiff.new_scal(2)
f1 = x * y + x
assert f1.val == 9
f2 = 8 * y
assert f2.val == 16
f = FADiff.new_funcvect([f1, f2])
assert f.val.tolist() == [9, 16]
assert f.der.tolist() == [[3, 3], [0, 8]]
# forward mode vector tests
x1 = FADiff.new_vect(np.array([2, 3, 4]))
f3 = x1 * x1
f4 = x1 * 8
ff = FADiff.new_funcvect([f3, f4])
assert ff.val.tolist() == [[4, 9, 16], [16, 24, 32]]
assert ff.der.tolist() == [[4, 6, 8], [8, 8, 8]]
# test fucntions of dif types
with pytest.raises(Exception):
ff = FADiff.new_funcvect([f3, 17])
# reverse mode scalar tests
xr = FADiff()
xr.set_mode('reverse')
xr = FADiff.new_scal(3)
yr = FADiff.new_scal(2)
f1r = xr * yr + xr
assert f1r.val == 9
f2r = 8 * yr
assert f2r.val == 16
fr = FADiff.new_funcvect([f1r, f2r])
assert fr.val.tolist() == [9, 16]
assert fr.der.tolist() == [[3, 3], [0, 8]]
# reverse mode vector tests
x1r = FADiff.new_vect(np.array([2, 3, 4]))
f3r = x1r * x1r
f4r = x1r * 8
ffr = FADiff.new_funcvect([f3r, f4r])
assert ffr.val.tolist() == [[4, 9, 16], [16, 24, 32]]
assert ffr.der.tolist() == [[4, 6, 8], [8, 8, 8]]
# test fucntions of dif types
with pytest.raises(Exception):
ffr = FADiff.new_funcvect([f3r, 17])
| 27.731313
| 70
| 0.552633
| 1,970
| 13,727
| 3.746193
| 0.066497
| 0.077778
| 0.123306
| 0.079675
| 0.858808
| 0.831978
| 0.822629
| 0.771545
| 0.719106
| 0.712331
| 0
| 0.10533
| 0.30837
| 13,727
| 494
| 71
| 27.787449
| 0.672003
| 0.022802
| 0
| 0.723118
| 0
| 0
| 0.024032
| 0
| 0
| 0
| 0
| 0
| 0.395161
| 1
| 0.134409
| false
| 0
| 0.016129
| 0
| 0.153226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
076d3a1d7530b15fa3fa51defdbc2bd22e129c91
| 68
|
py
|
Python
|
examples/modules/target_visualization.py
|
snehankekre/streamlit-yellowbrick
|
fd94bf4554966390ee578831612350d613aa3de7
|
[
"MIT"
] | 7
|
2021-06-08T10:24:19.000Z
|
2022-02-02T11:57:56.000Z
|
examples/modules/target_visualization.py
|
snehankekre/streamlit-yellowbrick
|
fd94bf4554966390ee578831612350d613aa3de7
|
[
"MIT"
] | null | null | null |
examples/modules/target_visualization.py
|
snehankekre/streamlit-yellowbrick
|
fd94bf4554966390ee578831612350d613aa3de7
|
[
"MIT"
] | null | null | null |
import streamlit as st
def run_target_visualization():
return
| 11.333333
| 31
| 0.764706
| 9
| 68
| 5.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191176
| 68
| 5
| 32
| 13.6
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
0791616213cbe80f61b7ce246d522b32b3897609
| 121
|
py
|
Python
|
fieldosophy/misc/__init__.py
|
andyGFHill/fieldosophy
|
8677048d56b382a45a80383fe8ff84d75a5f9760
|
[
"BSD-3-Clause"
] | 3
|
2021-05-03T10:07:08.000Z
|
2022-03-17T19:24:28.000Z
|
fieldosophy/misc/__init__.py
|
andyGFHill/fieldosophy
|
8677048d56b382a45a80383fe8ff84d75a5f9760
|
[
"BSD-3-Clause"
] | null | null | null |
fieldosophy/misc/__init__.py
|
andyGFHill/fieldosophy
|
8677048d56b382a45a80383fe8ff84d75a5f9760
|
[
"BSD-3-Clause"
] | 1
|
2022-01-27T11:49:02.000Z
|
2022-01-27T11:49:02.000Z
|
from . import Cheb
from . import misc_spatiotemporal
from . import misc_functions
from . import misc_templateMatching
| 15.125
| 35
| 0.809917
| 15
| 121
| 6.333333
| 0.466667
| 0.421053
| 0.442105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157025
| 121
| 7
| 36
| 17.285714
| 0.931373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
07b2e095a349a72541aaba9e63ce5aee9d63fd48
| 43
|
py
|
Python
|
python/condition/__init__.py
|
letitgone/python-crash-course
|
2551f2977f2814393fc4f44b8e2f20bdcc20141b
|
[
"MIT"
] | null | null | null |
python/condition/__init__.py
|
letitgone/python-crash-course
|
2551f2977f2814393fc4f44b8e2f20bdcc20141b
|
[
"MIT"
] | null | null | null |
python/condition/__init__.py
|
letitgone/python-crash-course
|
2551f2977f2814393fc4f44b8e2f20bdcc20141b
|
[
"MIT"
] | null | null | null |
# @Author ZhangGJ
# @Date 2020/12/24 08:04
| 14.333333
| 24
| 0.674419
| 8
| 43
| 3.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.162791
| 43
| 2
| 25
| 21.5
| 0.472222
| 0.883721
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07baf2b25ddb771e9e035058b65eb72b7c1fb85d
| 9,506
|
py
|
Python
|
molsysmt/forms/classes/api_openmm_GromacsGroFile.py
|
uibcdf/MolSysMT
|
9866a6fb090df9fff36af113a45164da4b674c09
|
[
"MIT"
] | 3
|
2020-06-02T03:55:52.000Z
|
2022-03-21T04:43:52.000Z
|
molsysmt/forms/classes/api_openmm_GromacsGroFile.py
|
uibcdf/MolSysMT
|
9866a6fb090df9fff36af113a45164da4b674c09
|
[
"MIT"
] | 28
|
2020-06-24T00:55:53.000Z
|
2021-07-16T22:09:19.000Z
|
molsysmt/forms/classes/api_openmm_GromacsGroFile.py
|
uibcdf/MolSysMT
|
9866a6fb090df9fff36af113a45164da4b674c09
|
[
"MIT"
] | 1
|
2021-06-17T18:55:25.000Z
|
2021-06-17T18:55:25.000Z
|
from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
import numpy as np
from simtk.openmm.app import GromacsGroFile as _openmm_GromacsGroFile
import importlib
import sys
from molsysmt import puw
from molsysmt.molecular_system import molecular_system_components
form_name='openmm.GromacsGroFile'
is_form={
_openmm_GromacsGroFile : form_name,
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['elements', 'coordinates', 'box']:
has[ii]=True
def to_openmm_GromacsGroFile(item, molecular_system=None, atom_indices='all', frame_indices='all', copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract_item(item)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract_item(item, atom_indices=atom_indices, frame_indices=frame_indices)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def extract_item(item, atom_indices='all', frame_indices='all'):
if (atom_indices is 'all') and (frame_indices is 'all'):
raise NotImplementedError()
else:
raise NotImplementedError()
return tmp_item
def add(item, from_item, atom_indices='all', frame_indices='all'):
raise NotImplementedError
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
##### Set
def aux_get(item, indices='all', frame_indices='all'):
raise NotImplementedError
## Atom
def get_atom_index_from_atom(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_atom_id_from_atom(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_atom_name_from_atom(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_atom_type_from_atom(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_group_index_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_component_index_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_chain_index_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_molecule_index_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_entity_index_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_inner_bonded_atoms_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_inner_bonds_from_atom (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_coordinates_from_atom(item, indices='all', frame_indices='all'):
xyz = item.getPositions(asNumpy=True)
unit = puw.get_unit(xyz)
xyz = np.expand_dims(xyz, axis=0)
if frame_indices is not 'all':
xyz = xyz[frame_indices, :, :]
if indices is not 'all':
xyz = xyz[:, indices, :]
xyz = puw.standardize(xyz*unit)
return xyz
def get_frame_from_atom(item, indices='all', frame_indices='all'):
tmp_step = get_step_from_system(item, frame_indices=frame_indices)
tmp_time = get_time_from_system(item, frame_indices=frame_indices)
tmp_box = get_box_from_system(item, frame_indices=frame_indices)
tmp_coordinates = get_coordinates_from_atom(item, indices=indices, frame_indices=frame_indices)
return tmp_step, tmp_time, tmp_coordinates, tmp_box
## group
def get_group_id_from_group(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_group_name_from_group(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_group_type_from_group(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## component
def get_component_id_from_component (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_component_name_from_component (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_component_type_from_component (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## molecule
def get_molecule_id_from_molecule (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_molecule_name_from_molecule (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_molecule_type_from_molecule (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## chain
def get_chain_id_from_chain (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_chain_name_from_chain (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_chain_type_from_chain (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## entity
def get_entity_id_from_entity (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_entity_name_from_entity (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_entity_type_from_entity (item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## system
def get_n_atoms_from_system(item, indices='all', frame_indices='all'):
return len(item.atomNames)
def get_n_groups_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_components_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_chains_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_molecules_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_entities_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_n_bonds_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_box_from_system(item, indices='all', frame_indices='all'):
box = item.getPeriodicBoxVectors()
unit = puw.get_unit(box)
box = np.expand_dims(puw.get_value(box), axis=0)
box = puw.standardize(box*unit)
return box
def get_box_shape_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_lengths_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_angles_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_volume_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_time_from_system(item, indices='all', frame_indices='all'):
return None
def get_step_from_system(item, indices='all', frame_indices='all'):
return None
def get_n_frames_from_system(item, indices='all', frame_indices='all'):
if frame_indices is 'all':
return item.getNumFrames()
else:
return len(frame_indices)
def get_bonded_atoms_from_system(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
## bond
def get_bond_order_from_bond(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_bond_type_from_bond(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
def get_atom_index_from_bond(item, indices='all', frame_indices='all'):
return aux_get(item, indices=indices, frame_indices=frame_indices)
###### Set
def set_box_to_system(item, indices='all', frame_indices='all', value=None):
raise NotImplementedError
def set_coordinates_to_system(item, indices='all', frame_indices='all', value=None):
raise NotImplementedError
| 31.065359
| 136
| 0.759941
| 1,360
| 9,506
| 4.974265
| 0.073529
| 0.253659
| 0.227494
| 0.172358
| 0.788027
| 0.774871
| 0.759202
| 0.744272
| 0.688101
| 0.67391
| 0
| 0.000242
| 0.130234
| 9,506
| 305
| 137
| 31.167213
| 0.817973
| 0.006522
| 0
| 0.360248
| 0
| 0
| 0.040574
| 0.00223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.335404
| false
| 0
| 0.049689
| 0.242236
| 0.670807
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
07cbd695d95b73c8c1a1b4cab4d2f5cb5b025041
| 6,685
|
py
|
Python
|
tests/testbokehwidgets.py
|
stonebig/holoviews
|
d5270c30dd1af38a785452aeac2fbabbe528e892
|
[
"BSD-3-Clause"
] | 2
|
2020-08-13T00:11:46.000Z
|
2021-01-31T22:13:21.000Z
|
tests/testbokehwidgets.py
|
adsbxchange/holoviews
|
7c06dbd63945fd66e63b17060956634be3ba17fe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testbokehwidgets.py
|
adsbxchange/holoviews
|
7c06dbd63945fd66e63b17060956634be3ba17fe
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import SkipTest
import numpy as np
from holoviews.core import Dimension, NdMapping
from holoviews.element.comparison import ComparisonTestCase
try:
from holoviews.plotting.bokeh.widgets import BokehServerWidgets
from bokeh.models.widgets import Select, Slider, AutocompleteInput, TextInput, Div
except:
BokehServerWidgets = None
class TestBokehServerWidgets(ComparisonTestCase):
def setUp(self):
if not BokehServerWidgets:
raise SkipTest("Bokeh required to test BokehServerWidgets")
def test_bokeh_server_dynamic_range_int(self):
dim = Dimension('x', range=(3, 11))
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 3)
self.assertEqual(widget.start, 3)
self.assertEqual(widget.end, 11)
self.assertEqual(widget.step, 1)
self.assertIsInstance(label, TextInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3')
self.assertIs(mapping, None)
def test_bokeh_server_dynamic_range_float(self):
dim = Dimension('x', range=(3.1, 11.2))
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 3.1)
self.assertEqual(widget.start, 3.1)
self.assertEqual(widget.end, 11.2)
self.assertEqual(widget.step, 0.01)
self.assertIsInstance(label, TextInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3.1')
self.assertIs(mapping, None)
def test_bokeh_server_dynamic_range_float_step(self):
dim = Dimension('x', range=(3.1, 11.2), step=0.1)
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 3.1)
self.assertEqual(widget.start, 3.1)
self.assertEqual(widget.end, 11.2)
self.assertEqual(widget.step, 0.1)
self.assertIsInstance(label, TextInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3.1')
self.assertIs(mapping, None)
def test_bokeh_server_dynamic_range_not_editable(self):
dim = Dimension('x', range=(3.1, 11.2))
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=False)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 3.1)
self.assertEqual(widget.start, 3.1)
self.assertEqual(widget.end, 11.2)
self.assertEqual(widget.step, 0.01)
self.assertIsInstance(label, Div)
self.assertEqual(label.text, '<b>%s</b>' % dim.pprint_value_string(3.1))
self.assertIs(mapping, None)
def test_bokeh_server_dynamic_values_int(self):
values = list(range(3, 11))
dim = Dimension('x', values=values)
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 0)
self.assertEqual(widget.start, 0)
self.assertEqual(widget.end, 7)
self.assertEqual(widget.step, 1)
self.assertIsInstance(label, AutocompleteInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3')
self.assertEqual(mapping, [(v, dim.pprint_value(v)) for v in values])
def test_bokeh_server_dynamic_values_float_not_editable(self):
values = list(np.linspace(3.1, 11.2, 7))
dim = Dimension('x', values=values)
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=False)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 0)
self.assertEqual(widget.start, 0)
self.assertEqual(widget.end, 6)
self.assertEqual(widget.step, 1)
self.assertIsInstance(label, Div)
self.assertEqual(label.text, '<b>%s</b>' % dim.pprint_value_string(3.1))
self.assertEqual(mapping, [(v, dim.pprint_value(v)) for v in values])
def test_bokeh_server_dynamic_values_float_editable(self):
values = list(np.linspace(3.1, 11.2, 7))
dim = Dimension('x', values=values)
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 0)
self.assertEqual(widget.start, 0)
self.assertEqual(widget.end, 6)
self.assertEqual(widget.step, 1)
self.assertIsInstance(label, AutocompleteInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3.1')
self.assertEqual(mapping, [(v, dim.pprint_value(v)) for v in values])
def test_bokeh_server_dynamic_values_str_1(self):
values = [chr(65+i) for i in range(10)]
dim = Dimension('x', values=values)
widget, label, mapping = BokehServerWidgets.create_widget(dim, editable=True)
self.assertIsInstance(widget, Select)
self.assertEqual(widget.value, 'A')
self.assertEqual(widget.options, list(zip(values, values)))
self.assertEqual(widget.title, dim.pprint_label)
self.assertIs(mapping, None)
self.assertIs(label, None)
def test_bokeh_server_dynamic_values_str_2(self):
keys = [chr(65+i) for i in range(10)]
ndmap = NdMapping({i: None for i in keys}, kdims=['x'])
dim = Dimension('x')
widget, label, mapping = BokehServerWidgets.create_widget(dim, ndmap, editable=True)
self.assertIsInstance(widget, Select)
self.assertEqual(widget.value, 'A')
self.assertEqual(widget.options, list(zip(keys, keys)))
self.assertEqual(widget.title, dim.pprint_label)
self.assertEqual(mapping, list(zip(keys, keys)))
def test_bokeh_server_static_numeric_values(self):
dim = Dimension('x')
ndmap = NdMapping({i: None for i in range(3, 12)}, kdims=['x'])
widget, label, mapping = BokehServerWidgets.create_widget(dim, ndmap, editable=True)
self.assertIsInstance(widget, Slider)
self.assertEqual(widget.value, 0)
self.assertEqual(widget.start, 0)
self.assertEqual(widget.end, 8)
self.assertEqual(widget.step, 1)
self.assertIsInstance(label, AutocompleteInput)
self.assertEqual(label.title, dim.pprint_label)
self.assertEqual(label.value, '3')
self.assertEqual(mapping, [(k, dim.pprint_value(k)) for k in ndmap.keys()])
| 45.168919
| 92
| 0.678384
| 823
| 6,685
| 5.409478
| 0.115431
| 0.192049
| 0.179245
| 0.040431
| 0.843441
| 0.831536
| 0.817161
| 0.795373
| 0.761456
| 0.755391
| 0
| 0.020861
| 0.204039
| 6,685
| 147
| 93
| 45.47619
| 0.815824
| 0
| 0
| 0.687023
| 0
| 0
| 0.012715
| 0
| 0
| 0
| 0
| 0
| 0.618321
| 1
| 0.083969
| false
| 0
| 0.045802
| 0
| 0.137405
| 0.10687
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07dd5b63b3be753f014615c67bfff98ee9ea1bd3
| 8,633
|
py
|
Python
|
akshare/stock_feature/stock_em_report.py
|
repos-cl/akshare
|
94fa42fb095ac4bfa5d8d58673b805d36cc0128e
|
[
"MIT"
] | 5
|
2020-09-08T02:51:37.000Z
|
2022-03-23T07:43:45.000Z
|
akshare/stock_feature/stock_em_report.py
|
repos-cl/akshare
|
94fa42fb095ac4bfa5d8d58673b805d36cc0128e
|
[
"MIT"
] | 3
|
2021-01-26T09:31:43.000Z
|
2021-12-08T08:31:54.000Z
|
akshare/stock_feature/stock_em_report.py
|
repos-cl/akshare
|
94fa42fb095ac4bfa5d8d58673b805d36cc0128e
|
[
"MIT"
] | 13
|
2020-07-08T08:48:33.000Z
|
2022-03-23T08:37:11.000Z
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/4/5 16:53
Desc: 东方财富-数据中心-年报季报-业绩快报-三大报表
资产负债表
http://data.eastmoney.com/bbsj/202003/zcfz.html
利润表
http://data.eastmoney.com/bbsj/202003/lrb.html
现金流量表
http://data.eastmoney.com/bbsj/202003/xjll.html
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_em_zcfz(date: str = "20200331") -> pd.DataFrame:
"""
东方财富-数据中心-年报季报-业绩快报-资产负债表
http://data.eastmoney.com/bbsj/202003/zcfz.html
:param date: "20200331", "20200630", "20200930", "20201231"; 从 20100331 开始
:type date: str
:return: 资产负债表
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/get"
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": "1",
"type": "RPT_DMSK_FN_BALANCE",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1)):
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": page,
"type": "RPT_DMSK_FN_BALANCE",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"股票代码",
"_",
"_",
"股票简称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"公告日期",
"_",
"资产-总资产",
"_",
"资产-货币资金",
"_",
"资产-应收账款",
"_",
"资产-存货",
"_",
"负债-总负债",
"负债-应付账款",
"_",
"负债-预收账款",
"_",
"股东权益合计",
"_",
"资产-总资产同比",
"负债-总负债同比",
"_",
"资产负债率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"资产-货币资金",
"资产-应收账款",
"资产-存货",
"资产-总资产",
"资产-总资产同比",
"负债-应付账款",
"负债-预收账款",
"负债-总负债",
"负债-总负债同比",
"资产负债率",
"股东权益合计",
"公告日期",
]
]
return big_df
def stock_em_lrb(date: str = "20200331") -> pd.DataFrame:
"""
东方财富-数据中心-年报季报-业绩快报-利润表
http://data.eastmoney.com/bbsj/202003/lrb.html
:param date: "20200331", "20200630", "20200930", "20201231"; 从 20100331 开始
:type date: str
:return: 利润表
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/get"
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": "1",
"type": "RPT_DMSK_FN_INCOME",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1)):
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": page,
"type": "RPT_DMSK_FN_INCOME",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"股票代码",
"_",
"_",
"股票简称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"公告日期",
"_",
"净利润",
"营业总收入",
"营业总支出-营业总支出",
"_",
"营业总支出-营业支出",
"_",
"_",
"营业总支出-销售费用",
"营业总支出-管理费用",
"营业总支出-财务费用",
"营业利润",
"利润总额",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"营业总收入同比",
"_",
"净利润同比",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"净利润",
"净利润同比",
"营业总收入",
"营业总收入同比",
"营业总支出-营业支出",
"营业总支出-销售费用",
"营业总支出-管理费用",
"营业总支出-财务费用",
"营业总支出-营业总支出",
"营业利润",
"利润总额",
"公告日期",
]
]
return big_df
def stock_em_xjll(date: str = "20200331") -> pd.DataFrame:
"""
东方财富-数据中心-年报季报-业绩快报-现金流量表
http://data.eastmoney.com/bbsj/202003/xjll.html
:param date: "20200331", "20200630", "20200930", "20201231"; 从 20100331 开始
:type date: str
:return: 现金流量表
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/get"
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": "1",
"type": "RPT_DMSK_FN_CASHFLOW",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1)):
params = {
"st": "NOTICE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": page,
"type": "RPT_DMSK_FN_CASHFLOW",
"sty": "ALL",
"token": "894050c76af8597a853f5b408b759f5d",
"filter": f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"股票代码",
"_",
"_",
"股票简称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"公告日期",
"_",
"经营性现金流-现金流量净额",
"经营性现金流-净现金流占比",
"_",
"_",
"_",
"_",
"投资性现金流-现金流量净额",
"投资性现金流-净现金流占比",
"_",
"_",
"_",
"_",
"融资性现金流-现金流量净额",
"融资性现金流-净现金流占比",
"净现金流-净现金流",
"净现金流-同比增长",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"净现金流-净现金流",
"净现金流-同比增长",
"经营性现金流-现金流量净额",
"经营性现金流-净现金流占比",
"投资性现金流-现金流量净额",
"投资性现金流-净现金流占比",
"融资性现金流-现金流量净额",
"融资性现金流-净现金流占比",
"公告日期",
]
]
return big_df
if __name__ == "__main__":
stock_em_zcfz_df = stock_em_zcfz(date="20200331")
print(stock_em_zcfz_df)
stock_em_lrb_df = stock_em_lrb(date="20200331")
print(stock_em_lrb_df)
stock_em_xjll_df = stock_em_xjll(date="20200331")
print(stock_em_xjll_df)
| 22.960106
| 85
| 0.42407
| 823
| 8,633
| 4.149453
| 0.17497
| 0.043924
| 0.029868
| 0.035139
| 0.882284
| 0.8653
| 0.835139
| 0.802928
| 0.781845
| 0.674671
| 0
| 0.080923
| 0.387351
| 8,633
| 375
| 86
| 23.021333
| 0.564757
| 0.099849
| 0
| 0.904908
| 0
| 0
| 0.261591
| 0.071307
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009202
| false
| 0
| 0.009202
| 0
| 0.027607
| 0.009202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07f12e74f06b3b9f32b216b20a78a1d128ba11a1
| 176
|
py
|
Python
|
HR_pythonMutateString.py
|
bluewitch/Code-Blue-Python
|
07230fbc8e20d263950ad2476e79da12b64cff2d
|
[
"MIT"
] | null | null | null |
HR_pythonMutateString.py
|
bluewitch/Code-Blue-Python
|
07230fbc8e20d263950ad2476e79da12b64cff2d
|
[
"MIT"
] | null | null | null |
HR_pythonMutateString.py
|
bluewitch/Code-Blue-Python
|
07230fbc8e20d263950ad2476e79da12b64cff2d
|
[
"MIT"
] | 1
|
2020-02-13T14:47:12.000Z
|
2020-02-13T14:47:12.000Z
|
# pythonMutateString.py
def mutate_string(string, position, character):
# Yet another, Python one liner...
return string[:position] + character + string[position + 1:]
| 35.2
| 64
| 0.721591
| 20
| 176
| 6.3
| 0.7
| 0.333333
| 0.365079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.159091
| 176
| 4
| 65
| 44
| 0.844595
| 0.306818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5803cb55c211167d643c5ef5fe49e6410a9d2935
| 256
|
py
|
Python
|
herbieapp/services/__init__.py
|
merltron-pa/herbie-test
|
2dd601ed09045fd109fde4f56c8412fd0fd0df11
|
[
"MIT"
] | null | null | null |
herbieapp/services/__init__.py
|
merltron-pa/herbie-test
|
2dd601ed09045fd109fde4f56c8412fd0fd0df11
|
[
"MIT"
] | 3
|
2020-12-14T12:03:48.000Z
|
2021-06-30T10:57:00.000Z
|
herbieapp/services/__init__.py
|
merltron-pa/herbie-test
|
2dd601ed09045fd109fde4f56c8412fd0fd0df11
|
[
"MIT"
] | null | null | null |
from .message_publisher import *
from .business_entity_manager import *
from .schema_package import *
from .schema_registry import *
from .json_schema_validator import *
from .schema_importer import *
from .utils import *
from .message_publisher import *
| 25.6
| 38
| 0.808594
| 33
| 256
| 6
| 0.424242
| 0.353535
| 0.242424
| 0.262626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128906
| 256
| 9
| 39
| 28.444444
| 0.887892
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ed0b76aa961a60c8568db508fedf292e96c029c0
| 144
|
py
|
Python
|
temboo/core/Library/GitHub/GitDataAPI/Commits/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/GitHub/GitDataAPI/Commits/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/GitHub/GitDataAPI/Commits/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.GitHub.GitDataAPI.Commits.GetCommit import GetCommit, GetCommitInputSet, GetCommitResultSet, GetCommitChoreographyExecution
| 72
| 143
| 0.895833
| 12
| 144
| 10.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048611
| 144
| 1
| 144
| 144
| 0.941606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ed0ba015474ae27105e2a4e5be74d432e44e029f
| 5,460
|
py
|
Python
|
dfirtrack_config/tests/system/test_system_importer_file_csv_form_based_config_views.py
|
FabFaeb/dfirtrack
|
6dd1f5d16a688ea921753512fbf38ec8865e4c48
|
[
"MIT"
] | null | null | null |
dfirtrack_config/tests/system/test_system_importer_file_csv_form_based_config_views.py
|
FabFaeb/dfirtrack
|
6dd1f5d16a688ea921753512fbf38ec8865e4c48
|
[
"MIT"
] | null | null | null |
dfirtrack_config/tests/system/test_system_importer_file_csv_form_based_config_views.py
|
FabFaeb/dfirtrack
|
6dd1f5d16a688ea921753512fbf38ec8865e4c48
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
import urllib.parse
class SystemImporterFileCsvFormbasedConfigViewTestCase(TestCase):
""" system importer file CSV form-based config view tests """
@classmethod
def setUpTestData(cls):
# create user
User.objects.create_user(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
def test_system_importer_file_csv_form_based_config_not_logged_in(self):
""" test importer view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/config/system/importer/file/csv/formbased/', safe='')
# get response
response = self.client.get('/config/system/importer/file/csv/formbased/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_system_importer_file_csv_form_based_config_logged_in(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# get response
response = self.client.get('/config/system/importer/file/csv/formbased/')
# compare
self.assertEqual(response.status_code, 200)
def test_system_importer_file_csv_form_based_config_template(self):
""" test importer view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# get response
response = self.client.get('/config/system/importer/file/csv/formbased/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/system/system_importer_file_csv_form_based_config_popup.html')
def test_system_importer_file_csv_form_based_config_get_user_context(self):
""" test importer view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# get response
response = self.client.get('/config/system/importer/file/csv/formbased/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_form_based_config')
def test_system_importer_file_csv_form_based_config_redirect(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# create url
destination = urllib.parse.quote('/config/system/importer/file/csv/formbased/', safe='/')
# get response
response = self.client.get('/config/system/importer/file/csv/formbased', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_system_importer_file_csv_form_based_config_post_message(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# create post data
data_dict = {
'csv_column_system': 1,
'csv_column_ip': 2,
}
# get response
response = self.client.post('/config/system/importer/file/csv/formbased/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(messages[-1]), 'System importer file CSV form based config changed')
def test_system_importer_file_csv_form_based_config_post_redirect(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# create post data
data_dict = {
'csv_column_system': 1,
'csv_column_ip': 2,
}
# get response
response = self.client.post('/config/system/importer/file/csv/formbased/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_system_importer_file_csv_form_based_config_post_invalid_reload(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# create post data
data_dict = {
'csv_column_system': 1,
'csv_column_ip': 1,
}
# get response
response = self.client.post('/config/system/importer/file/csv/formbased/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_system_importer_file_csv_form_based_config_post_invalid_template(self):
""" test view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_form_based_config', password='SYYCdriMtm3lk6MPBYmD')
# create post data
data_dict = {
'csv_column_system': 2,
'csv_column_ip': 2,
}
# get response
response = self.client.post('/config/system/importer/file/csv/formbased/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/system/system_importer_file_csv_form_based_config_popup.html')
| 42.65625
| 129
| 0.696703
| 629
| 5,460
| 5.720191
| 0.127186
| 0.132296
| 0.170095
| 0.198444
| 0.859922
| 0.859922
| 0.859922
| 0.839911
| 0.827682
| 0.791829
| 0
| 0.011062
| 0.205311
| 5,460
| 127
| 130
| 42.992126
| 0.818161
| 0.110806
| 0
| 0.483871
| 0
| 0
| 0.3159
| 0.238605
| 0
| 0
| 0
| 0
| 0.145161
| 1
| 0.16129
| false
| 0.145161
| 0.612903
| 0
| 0.790323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 10
|
ed3c3334cee3e9b629570bbd78f15f4f58fa2b4a
| 13,807
|
py
|
Python
|
src/rose-venn.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 8
|
2019-09-20T15:47:54.000Z
|
2021-11-01T02:05:17.000Z
|
src/rose-venn.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 9
|
2020-03-24T16:53:29.000Z
|
2022-01-13T01:07:07.000Z
|
src/rose-venn.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 3
|
2020-07-18T20:42:43.000Z
|
2021-04-14T11:31:42.000Z
|
#from matplotlib_venn import venn3, venn3_circles
#from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from matplotlib_venn import venn3, venn3_circles
import matplotlib.pyplot as plt
import os, errno
import pandas as pd
__author__ = 'proccaserra (Philippe Rocca-Serra)'
# author: philippe rocca-serra (philippe.rocca-serra@oerc.ox.ac.uk)
# ontology: http://www.stato-ontology.org
plt.rcParams.update({'font.family': 'Arial', 'font.size': 12, 'font.style':'normal'})
# Table S1
# ["(E,E)_farnesal","(E,E)_farnesol","(E,E)_farnesyl acetate","(E)_2_hexen_1_ol","(E)_2_hexenal","(E)_beta_farnesene","(E)_beta_ocimene","(Z)_3_hexen_1_ol","(Z)_3_hexenyl acetate","1,3,5_trimethoxybenzene","2_phenylethanol","3,5_dimethoxytoluene","alpha_cadinol","benzaldehyde","benzylalcohol","beta_myrcene","bicyclogermacrene","citronellol","delta_cadinene","dihydro_beta_ionol","dihydro_beta_ionone","elemol","eugenol","geranial","geranic_acid","geraniol","geranyl acetate","germacrene D","germacrene D_4_ol","hexan_1_ol","hexanal","hexyl acetate","methyl eugenol","neral","nerol","nonanal","phenylacetaldehyde","tau_cadinol","tau_muurolol","Z_β_ocimene"]
TableS1_Science2015 = set(["E_E_farnesal","E_E_farnesol","E_E_farnesyl_acetate","E_2_hexen_1_ol","E_2_hexenal","E_beta_farnesene","E_beta_ocimene","Z_3_hexen_1_ol","Z_3_hexenyl_acetate","1_3_5_trimethoxybenzene","2_phenylethanol","3_5_dimethoxytoluene","alpha_cadinol","benzaldehyde","benzylalcohol","beta_myrcene","bicyclogermacrene","citronellol","delta_cadinene","dihydro_beta_ionol","dihydro_beta_ionone","elemol","eugenol","geranial","geranic_acid","geraniol","geranyl_acetate","germacrene_D","germacrene_D_4_ol","hexan_1_ol","hexanal","hexyl_acetate","methyl_eugenol","neral","nerol","nonanal","phenylacetaldehyde","tau_cadinol","tau_muurolol","Z_beta_ocimene"])
print("size set 1: ", len(TableS1_Science2015))
# Table S3:
# ["(E,E)_farnesol","(E)_beta_farnesene","alpha_cadinol","beta_myrcene","bicyclogermacrene","citronellal","citronellol","delta_cadinene","geranial","geraniol","geranyl acetate","germacrene D","germacrene D_4_ol","limonene","linalool","neral","nerol","beta_caryophyllene","beta_elemene","beta_pinene","tau_cadinol","tau_muurolol","alpha_humulene","alpha_muurolene","alpha_muurolol","alpha_pinene"])
TableS3_Science2015 = set(["E_E_farnesol","E_beta_farnesene","alpha_cadinol","beta_myrcene","bicyclogermacrene","citronellal","citronellol","delta_cadinene","geranial","geraniol","geranyl_acetate","germacrene_D","germacrene_D_4_ol","limonene","linalool","neral","nerol","beta_caryophyllene","beta_elemene","beta_pinene","tau_cadinol","tau_muurolol","alpha_humulene","alpha_muurolene","alpha_muurolol","alpha_pinene"])
print("size set 3: ", len(TableS3_Science2015))
# Table S4:
# set4_Science2015 = set(["(E)_beta_farnesene","(E)_beta_ocimene","allo_ocimene","beta_myrcene","citronellol","citronellyl_acetate","delta_cadinene","geraniol","germacrene D","limonene","neral","nerol","neryl_acetate","tau_cadinol","Z_beta_ocimene","alpha_phellandrene","alpha_terpinene","alpha_terpinolene","gamma_terpinene"])
TableS4_Science2015 = set(["E_beta_farnesene","E_beta_ocimene","allo_ocimene","beta_myrcene","citronellol","citronellyl_acetate","delta_cadinene","geraniol","germacrene_D","limonene","neral","nerol","neryl_acetate","tau_cadinol","Z_beta_ocimene","alpha_phellandrene","alpha_terpinene","alpha_terpinolene","gamma_terpinene"])
print("size set 4: ", len(TableS4_Science2015))
# set_NG2018=set(["hexan-2-ol","hexanal","(E)-2-hexenal","(Z)-3-hexen-1-ol","(E)-2-hexen-1-ol","hexan-1-ol","nonane",
# "alpha-pinene","benzaldehyde","beta-myrcene","(Z)-3-hexenyl acetate","hexyl acetate","(E)-hexenyl acetate",
# "(+/-)-limonene","benzylalcohol","phenylacetaldehyde","(E)-beta-ocimene","(+/-)-linalool",
# "nonanal","2-phenylethanol","(+/-)-beta-citronellal","(+/-)-alpha-terpineol","decanal",
# "nerol","(+/-)-beta-citronellol","neral","geraniol","beta-phenylethyl acetate","3,5-dimethoxytoluene",
# "geranial","undecanal","theaspirane A","(+/-)-beta-citronellyl acetate","eugenol","neryl acetate",
# "alpha-copaene","geranyl acetate","beta-elemene","methyl eugenol","(E)-beta-caryophyllene",
# "1,3,5-trimethoxybenzene","dihydro-beta-ionone","alpha guaiene","dihydro-beta-ionol","(E)-beta-farnesene",
# "germacrene D","pentadecane","(E,E)-alpha-farnesene","gamma-cadinene","delta-cadinene","elemol",
# "Germacrene D 4 ol","hexadecane","Tau-cadinol","beta-eudesmol","alpha-cadinol","heptadecene","heptadecane",
# "(E,E)-farnesol","(E,E)-farnesal","(E,E)-farnesyl acetate"])
set_NG2018=set(["hexan-2-ol","hexanal","E_2_hexenal","Z_3_hexen_1_ol","E_2_hexen_1_ol","hexan_1_ol","nonane","alpha_pinene","benzaldehyde","beta_myrcene","Z_3_hexenyl_acetate","hexyl_acetate","E_hexenyl_acetate","limonene","benzylalcohol","phenylacetaldehyde","E_beta_ocimene","linalool","nonanal","2_phenylethanol","beta_citronellal","alpha-terpineol","decanal","nerol","beta_citronellol","neral","geraniol","beta_phenylethyl_acetate","3_5_dimethoxytoluene","geranial","undecanal","theaspirane_A","beta_citronellyl_acetate","eugenol","neryl_acetate","alpha_copaene","geranyl_acetate","beta_elemene","methyl_eugenol","beta_caryophyllene","1_3_5_trimethoxybenzene","dihydro_beta_ionone","alpha_guaiene","dihydro_beta_ionol","E_beta_farnesene","germacrene_D","pentadecane","E_E_alpha_farnesene","gamma_cadinene","delta_cadinene","elemol","germacrene_D_4_ol","hexadecane","tau_cadinol","beta_eudesmol","alpha_cadinol","heptadecene","heptadecane","E_E_farnesol","E_E_farnesal","E_E_farnesyl_acetate"])
intersect_1v3 = set(TableS1_Science2015) & set(TableS3_Science2015)
intersect_1v4 = set(TableS1_Science2015) & set(TableS4_Science2015)
intersect_3v4 = set(TableS3_Science2015) & set(TableS4_Science2015)
intersect_all = set(TableS1_Science2015) & set(TableS3_Science2015) & set(TableS4_Science2015)
intersect_1vNG = set(TableS1_Science2015) & set(set_NG2018)
intersect_3vNG = set(TableS3_Science2015) & set(set_NG2018)
# print(len(intersect_1v3), "|", sorted(intersect_1v3))
# print(len(intersect_1v4), "|", sorted(intersect_1v4))
# print(len(intersect_3v4), "|", sorted(intersect_3v4))
# print(len(intersect_all), "|", sorted(intersect_all))
# print(len(intersect_1vNG), "|", sorted(intersect_1vNG))
# print(len(intersect_3vNG), "|", sorted(intersect_3vNG))
#creates figure showing the overlap between the different VOCs testing in the 2015 article:
# v=venn3([TableS1_Science2015, TableS3_Science2015, set_NG2018], ('TableS1-10.1126/science.aab0696', 'TableS3-10.1126/science.aab0696', 'SupplementaryData3-NatureGenetics.10.1038/s41588-018-0110-3'))
# plt.show()
v1 = venn3([TableS1_Science2015, TableS3_Science2015, set_NG2018], ('Table S1, doi:10.1126/science.aab0696, (Science, 2015)', 'Table S3, doi:10.1126/science.aab0696, (Science, 2015)', 'Supplementary Data 3, doi:10.1038/s41588-018-0110-3, (Nature Genetics, 2018)'))
c = venn3_circles([TableS1_Science2015, TableS3_Science2015, set_NG2018], linestyle='dotted', linewidth=1, color="black")
# c[0].set_lw(4.0)
# c[0].set_ls('dotted')
# c[0].set_color('skyblue')
# c[0].set_alpha(0.9)
# c[1].set_lw(4.0)
# c[1].set_ls('dotted')
# c[1].set_color('darkblue')
# c[2].set_lw(4.0)
# c[2].set_ls('dotted')
# c[2].set_color('violet')
plt.title('Common and distinct metabolites between 2 rose studies by Bendahmane group in 2015 and 2018\n')
for text in v1.set_labels:
text.set_fontsize(11)
text.set_family('arial')
text.set_style('normal')
# plt.show()
try:
if not os.path.exists('../figures/denovo'):
os.makedirs('../figures/denovo')
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig('../figures/denovo/Figure_2a-venn-diagram-Science2015&NatGen2018.png', bbox_inches='tight')
#///////////////////////////////\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# input for generating figure 2 using upSetR:
# https://gehlenborglab.shinyapps.io/upsetr/
# https://www.biorxiv.org/content/early/2017/03/25/120600.full.pdf+html
# chemical names had to be altered to allow lists to be used as input to the software.
# set1_Science2015 = set([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = set([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018=set([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\////////////////////////////////////////
# set1_Science2015 = pd.Series([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = pd.Series([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018 = pd.Series([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
# set1_Science2015 = set([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = set([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018 = set([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
| 110.456
| 997
| 0.805968
| 1,929
| 13,807
| 5.388802
| 0.130638
| 0.007696
| 0.015392
| 0.015873
| 0.83367
| 0.825878
| 0.81164
| 0.777778
| 0.777778
| 0.777778
| 0
| 0.040365
| 0.040052
| 13,807
| 124
| 998
| 111.346774
| 0.743926
| 0.689723
| 0
| 0
| 0
| 0.028571
| 0.565475
| 0.058489
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed7570bf0e6c6922e146b90a601f7926638cdcad
| 98,645
|
py
|
Python
|
model/stack_module.py
|
wgcban/spin_roadmapper
|
2c1c8f22073d989753dc6f95d1f547198a76414b
|
[
"Apache-2.0"
] | 24
|
2021-09-15T00:20:52.000Z
|
2022-03-27T05:01:23.000Z
|
model/stack_module.py
|
wgcban/spin_roadmapper
|
2c1c8f22073d989753dc6f95d1f547198a76414b
|
[
"Apache-2.0"
] | 2
|
2021-12-27T13:45:02.000Z
|
2022-03-25T13:33:20.000Z
|
model/stack_module.py
|
wgcban/spin_roadmapper
|
2c1c8f22073d989753dc6f95d1f547198a76414b
|
[
"Apache-2.0"
] | 3
|
2021-12-27T03:11:56.000Z
|
2022-03-10T10:24:42.000Z
|
from __future__ import print_function
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.glore import GloRe_Unit_2D, GloRe_Unit_SE_2D
from model.SPIN import spin
import cv2
from model.inception_glore import Inception_GloRe_Unit_2D, Inception_GloRe_Unit_2D_v2
affine_par = True
class BasicResnetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, padding=1, downsample=None):
super(BasicResnetBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=padding, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=padding, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters, group=1):
super(DecoderBlock, self).__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1, groups=group)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nn.ReLU(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.deconv2 = nn.ConvTranspose2d(
in_channels // 4,
in_channels // 4,
3,
stride=2,
padding=1,
output_padding=1,
groups=group,
)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nn.ReLU(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1, groups=group)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class HourglassModuleMTL(nn.Module):
def __init__(self, block, num_blocks, planes, depth):
super(HourglassModuleMTL, self).__init__()
self.depth = depth
self.block = block
self.upsample = nn.Upsample(scale_factor=2)
self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
def _make_residual1(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)
def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(4):
res.append(self._make_residual1(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual1(block, num_blocks, planes))
res.append(self._make_residual1(block, num_blocks, planes))
hg.append(nn.ModuleList(res))
return nn.ModuleList(hg)
def _hour_glass_forward(self, n, x):
rows = x.size(2)
cols = x.size(3)
up1 = self.hg[n - 1][0](x)
low1 = F.max_pool2d(x, 2, stride=2, ceil_mode=True)
low1 = self.hg[n - 1][1](low1)
if n > 1:
low2_1, low2_2 = self._hour_glass_forward(n - 1, low1)
else:
low2_1 = self.hg[n - 1][4](low1)
low2_2 = self.hg[n - 1][5](low1)
low3_1 = self.hg[n - 1][2](low2_1)
low3_2 = self.hg[n - 1][3](low2_2)
up2_1 = self.upsample(low3_1)
up2_2 = self.upsample(low3_2)
out_1 = up1 + up2_1[:, :, :rows, :cols]
out_2 = up1 + up2_2[:, :, :rows, :cols]
return out_1, out_2
def forward(self, x):
return self._hour_glass_forward(self.depth, x)
####### Orgnal StackHourglass Implementation #######
class StackHourglassNetMTL(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL, self).__init__()
#num_stacks = 1
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################ StackHourglassNetMTL_glorev1 #############
class StackHourglassNetMTLglore1(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTLglore1, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Glore module
self.glore_seg = GloRe_Unit_2D(num_in=32, num_mid=16)
self.glore_ang = GloRe_Unit_2D(num_in=32, num_mid=16)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
#GLore Units
f2 = self.glore_seg(f2)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
#GLore Units
a_f2 = self.glore_ang(a_f2)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################ StackHourglassNetMTL_glorev1 #############
class StackHourglassNetMTL_inception_glore(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_inception_glore, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Glore module
self.glore_seg = Inception_GloRe_Unit_2D(num_in=128, num_mid=16)
self.glore_ang = Inception_GloRe_Unit_2D(num_in=128, num_mid=16)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x_proj = x
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
#GLore Units
#y1 = self.glore_seg(y1, x_proj)
#y2 = self.glore_ang(y2, x_proj)
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
d1 = self.glore_seg(d1, x_proj) #GLore Units
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_d1 = self.glore_ang(a_d1, x_proj) #GLore Units
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################ StackHourglassNetMTL_glorev1 #############
class StackHourglassNetMTL_inception_glore_seg(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_inception_glore_seg, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Glore module
self.glore_seg = Inception_GloRe_Unit_2D(num_in=128, num_mid=16)
#self.glore_ang = Inception_GloRe_Unit_2D(num_in=128, num_mid=16)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x_proj = x
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
#GLore Units
#y1 = self.glore_seg(y1, x_proj)
#y2 = self.glore_ang(y2, x_proj)
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
d1 = self.glore_seg(d1, x_proj) #GLore Units
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
#a_d1 = self.glore_ang(a_d1, x_proj) #GLore Units
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################ StackHourglassNetMT_seg #############
#Concatenate and combine using 1x1 conv
class StackHourglassNetMTL_inception_glore_seg_v2(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_inception_glore_seg_v2, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Glore module
self.glore_seg64 = Inception_GloRe_Unit_2D_v2(num_in=128, num_mid=16)
self.glore_ang64 = Inception_GloRe_Unit_2D_v2(num_in=128, num_mid=16)
self.glore_seg128 = Inception_GloRe_Unit_2D_v2(num_in=128, num_mid=16)
self.glore_ang128 = Inception_GloRe_Unit_2D_v2(num_in=128, num_mid=16)
#self.glore_seg256 = Inception_GloRe_Unit_2D_v2(num_in=32, num_mid=16)
#self.glore_ang256 = Inception_GloRe_Unit_2D_v2(num_in=32, num_mid=16)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x_proj = x
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
#GLore Units
y1 = self.glore_seg64(y1)
y2 = self.glore_ang64(y2)
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
#GLore Units
d1 = self.glore_seg128(d1)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
#GLore Units
#f2 = self.glore_seg256(f2)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
#GLore Units
a_d1 = self.glore_ang128(a_d1)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
#GLore Units
# a_f2 = self.glore_ang256(a_f2, print_features=True)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################ StackHourglassNetMTL_glore_SE ############### (Reached 44.7% after 60th epoch)
# In this model we have the original StackHourGlass Module + Global Reasoning Unit
# The input brach of the glore unit is given through Squeeze and Exitation block
# So before doing the dimensionality reduction, we perform feature calibration so that most of the important features are highlighted.
class StackHourglassNetMTLgloreSE(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTLgloreSE, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Glore+SE module
self.glorese_seg = GloRe_Unit_SE_2D(num_in=128, num_mid=64)
self.glorese_ang = GloRe_Unit_SE_2D(num_in=128, num_mid=64)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
#GLore Units
y1 = self.glorese_seg(y1)
y2 = self.glorese_ang(y2)
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with Dual GCN ################
################################################################
# We added Dual GCN module after the multi stack block
class StackHourglassNetMTL_DGCN(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCN, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
#Dual Graph Convolutional module
self.dgcn_seg = spin(planes=128, ratio=1)#GloRe_Unit_2D(num_in=128, num_mid=64)
self.dgcn_ang = spin(planes=128, ratio=1)#GloRe_Unit_2D(num_in=128, num_mid=64)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
#GLore Units
y1 = self.dgcn_seg(y1)
y2 = self.dgcn_ang(y2)
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with Dual GCN ################
################################################################
# We added Dual GCN module after the first downsampling layer and only at segmentation branch
class StackHourglassNetMTL_DGCNv2(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCNv2, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
#Dual Graph Convolutional module
self.dgcn_seg = spin(planes=32, ratio=2)#GloRe_Unit_2D(num_in=128, num_mid=64)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
#Dual Graph Reasoning
f2 = self.dgcn_seg(f2)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with SPIN ################
################################################################
# We added Dual GCN module after the first downsampling layer and only at segmentation branch
# Added Dual GCN at multiple locations (256 x 256 scale) and (128 x 128 scale)
class StackHourglassNetMTL_DGCNv4(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCNv4, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
#Spin module
self.dgcn_seg_l41 = spin(planes=32, ratio=1)
self.dgcn_seg_l42 = spin(planes=32, ratio=1)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classifications
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
] #d1 = 128, 128,128
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f2 = self.dgcn_seg_l41(f2) #Graph reasoning at LEVEL 4 - 257 x 257 - SPIN layer 1
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
#f4 = self.dgcn_seg_l42(f4) #Graph reasoning at LEVEL 4 - 257 x 257 - SPIN layer 2
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with Hybrid GCN ##############
################################################################
# We added Dual GCN module after the first downsampling layer and only at segmentation branch
class StackHourglassNetMTL_DGCNv3(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCNv3, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
#Dual Graph Convolutional module
self.dgcn_seg = spin(planes=32, ratio=2)
self.dgcn_seg1 = spin(planes=32, ratio=4)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) #64 x 128 x 128
x = self.layer1(x)
x = self.maxpool(x) #64 x 64 x 64
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
#Dual Graph Reasoning
f2 = self.dgcn_seg(f2)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
#Dual Graph Reasoning
f4 = self.dgcn_seg1(f4)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with Dual GCN ################
################################################################
# Added skip connections
# Added Feature Pyramid
class StackHourglassNetMTL_DGCNv5(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCNv5, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.skip128 = nn.Conv2d(self.inplanes, self.num_feats, kernel_size=3, stride=1, padding=1)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
#Dual Graph Convolutional module
self.dgcn_seg1 = spin(planes=32, ratio=2)
self.dgcn_seg2 = spin(planes=32, ratio=2)
self.dgcn_seg3 = spin(planes=32, ratio=2)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
image = np.squeeze(x[0,:,:,:].permute(1,2,0).cpu().detach().numpy())
print(image.shape)
image *= (255.0/image.max())
cv2.imwrite("./deepglobe_exp/spin_mit/visuals/input.jpg", image)
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) #64 x 128 x 128
#for i in range(x.shape[1]):
#img = np.asarray(x[0][i].cpu().detach())
#img *= (255.0/img.max())
#cv2.imwrite("./deepglobe_exp/spin_mit/visuals/x_{}.jpg".format(i),np.asarray(img))
skip128 = self.relu(self.skip128(x))
x = self.layer1(x)
x = self.maxpool(x) #64 x 64 x 64
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
] #128,128,128
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
d1 = torch.add(d1, skip128) #Added Skip Connection here
f1 = self.finaldeconv1(d1) #32, 257,257
f2 = self.finalrelu1(f1)
#SPIN Pyramid
spin257 = self.dgcn_seg1(f2) #SPIN at 257x257 scale
f2_128 = self.maxpool(f2)
spin128 = self.dgcn_seg2(f2_128) #SPIN at 128*128 scale
f2_64 = self.maxpool(f2_128)
spin64 = self.dgcn_seg3(f2_64) #SPIN at 64*64 scale
spin64_up = F.interpolate(spin64, size=(f2_128.shape[2],f2_128.shape[3]), mode="bilinear")
spin128_comb= torch.add(spin64_up, spin128)
spin128_up = F.interpolate(spin128_comb, size=(spin257.shape[2],spin257.shape[3]), mode="bilinear")
f2 = torch.add(spin128_up, spin257)
f3 = self.finalconv2(f2) #32, 255, 255
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4) #2, 256, 256
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with Pyramid SPIN ############
################################################################
# We added SPIN module in segmentation branch Only
# Also we used SPIN pyramid
class StackHourglassNetMTL_SPIN_PYRAMID(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_SPIN_PYRAMID, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
# SPIN Module
self.dgcn_seg1 = spin(planes=32, ratio=2)
self.dgcn_seg2 = spin(planes=32, ratio=2)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
#Dual Graph Reasoning
spin257 = self.dgcn_seg1(f2) #SPIN at 257x257 scale
f2_128 = self.maxpool(f2)
spin128 = self.dgcn_seg2(f2_128) #SPIN at 128*128 scale
spin128_up = F.interpolate(spin128, size=(spin257.shape[2], spin257.shape[3]), mode="bilinear")
f2 = torch.add(spin128_up, spin257)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
################################################################
############### StackHOurGlassNet with SPIN ################
################################################################
# We cahnged our location of SPIN module to at the begining of the network
class StackHourglassNetMTL_DGCNv6(nn.Module):
def __init__(
self,
task1_classes=2,
task2_classes=37,
block=BasicResnetBlock,
in_channels=3,
num_stacks=2,
num_blocks=1,
hg_num_blocks=3,
depth=3,
):
super(StackHourglassNetMTL_DGCNv6, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(
in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.skip128 = nn.Conv2d(self.inplanes, self.num_feats, kernel_size=3, stride=1, padding=1)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, num_blocks)
self.layer3 = self._make_residual(block, self.num_feats, num_blocks)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# build hourglass modules
ch = self.num_feats * block.expansion
hg = []
res_1, fc_1, score_1, _fc_1, _score_1 = [], [], [], [], []
res_2, fc_2, score_2, _fc_2, _score_2 = [], [], [], [], []
for i in range(num_stacks):
hg.append(HourglassModuleMTL(block, hg_num_blocks, self.num_feats, depth))
res_1.append(self._make_residual(block, self.num_feats, hg_num_blocks))
res_2.append(self._make_residual(block, self.num_feats, hg_num_blocks))
fc_1.append(self._make_fc(ch, ch))
fc_2.append(self._make_fc(ch, ch))
score_1.append(nn.Conv2d(ch, task1_classes, kernel_size=1, bias=True))
score_2.append(nn.Conv2d(ch, task2_classes, kernel_size=1, bias=True))
if i < num_stacks - 1:
_fc_1.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_fc_2.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
_score_1.append(nn.Conv2d(task1_classes, ch, kernel_size=1, bias=True))
_score_2.append(nn.Conv2d(task2_classes, ch, kernel_size=1, bias=True))
self.hg = nn.ModuleList(hg)
self.res_1 = nn.ModuleList(res_1)
self.fc_1 = nn.ModuleList(fc_1)
self.score_1 = nn.ModuleList(score_1)
self._fc_1 = nn.ModuleList(_fc_1)
self._score_1 = nn.ModuleList(_score_1)
self.res_2 = nn.ModuleList(res_2)
self.fc_2 = nn.ModuleList(fc_2)
self.score_2 = nn.ModuleList(score_2)
self._fc_2 = nn.ModuleList(_fc_2)
self._score_2 = nn.ModuleList(_score_2)
# Final Classifier
self.decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.decoder1_score = nn.Conv2d(
self.inplanes, task1_classes, kernel_size=1, bias=True
)
self.finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, task1_classes, 2, padding=1)
# Final Classifier
self.angle_decoder1 = DecoderBlock(self.num_feats, self.inplanes)
self.angle_decoder1_score = nn.Conv2d(
self.inplanes, task2_classes, kernel_size=1, bias=True
)
self.angle_finaldeconv1 = nn.ConvTranspose2d(self.inplanes, 32, 3, stride=2)
self.angle_finalrelu1 = nn.ReLU(inplace=True)
self.angle_finalconv2 = nn.Conv2d(32, 32, 3)
self.angle_finalrelu2 = nn.ReLU(inplace=True)
self.angle_finalconv3 = nn.Conv2d(32, task2_classes, 2, padding=1)
#Dual Graph Convolutional module
self.SPIN1 = spin(planes=128, ratio=2)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=True,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out_1 = []
out_2 = []
rows = x.size(2)
cols = x.size(3)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) #64 x 128 x 128
x = self.layer1(x)
x = self.maxpool(x) #64 x 64 x 64
x = self.layer2(x)
x = self.layer3(x)
x = self.SPIN1(x) #SPIN Pyramid goes here
for i in range(self.num_stacks):
y1, y2 = self.hg[i](x)
y1, y2 = self.res_1[i](y1), self.res_2[i](y2)
y1, y2 = self.fc_1[i](y1), self.fc_2[i](y2)
score1, score2 = self.score_1[i](y1), self.score_2[i](y2)
out_1.append(
score1[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
out_2.append(
score2[:, :, : int(math.ceil(rows / 4.0)), : int(math.ceil(cols / 4.0))]
)
if i < self.num_stacks - 1:
_fc_1, _fc_2 = self._fc_1[i](y1), self._fc_2[i](y2)
_score_1, _score_2 = self._score_1[i](score1), self._score_2[i](score2)
x = x + _fc_1 + _score_1 + _fc_2 + _score_2
# Final Classification
d1 = self.decoder1(y1)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
] #128,128,128
d1_score = self.decoder1_score(d1)
out_1.append(d1_score)
f1 = self.finaldeconv1(d1) #32, 257,257
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2) #32, 255, 255
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4) #2, 256, 256
out_1.append(f5)
# Final Classification
a_d1 = self.angle_decoder1(y2)[
:, :, : int(math.ceil(rows / 2.0)), : int(math.ceil(cols / 2.0))
]
a_d1_score = self.angle_decoder1_score(a_d1)
out_2.append(a_d1_score)
a_f1 = self.angle_finaldeconv1(a_d1)
a_f2 = self.angle_finalrelu1(a_f1)
a_f3 = self.angle_finalconv2(a_f2)
a_f4 = self.angle_finalrelu2(a_f3)
a_f5 = self.angle_finalconv3(a_f4)
out_2.append(a_f5)
return out_1, out_2
| 38.45809
| 134
| 0.576816
| 13,377
| 98,645
| 4.023996
| 0.023174
| 0.049936
| 0.026974
| 0.032603
| 0.933549
| 0.928069
| 0.924799
| 0.922885
| 0.919579
| 0.915083
| 0
| 0.059957
| 0.289878
| 98,645
| 2,565
| 135
| 38.45809
| 0.70848
| 0.045314
| 0
| 0.861328
| 0
| 0
| 0.000712
| 0.000453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029785
| false
| 0
| 0.004883
| 0.000488
| 0.064941
| 0.000977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c1de659eaf5f759cbca0534e62f04ca38ae1021
| 11,050
|
py
|
Python
|
status-api/tests/unit/services/test_status.py
|
countable/sbc-auth
|
6db2dae8f3da0cc77eee127d1e0de449d7228946
|
[
"Apache-2.0"
] | null | null | null |
status-api/tests/unit/services/test_status.py
|
countable/sbc-auth
|
6db2dae8f3da0cc77eee127d1e0de449d7228946
|
[
"Apache-2.0"
] | 2
|
2019-03-25T18:42:13.000Z
|
2020-01-13T05:48:53.000Z
|
status-api/tests/unit/services/test_status.py
|
countable/sbc-auth
|
6db2dae8f3da0cc77eee127d1e0de449d7228946
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the Status check service layer.
Test-Suite to ensure that the Status Service layer is working as expected.
"""
from unittest.mock import patch
import arrow
from status_api.services.status import Status as StatusService
def test_check_status(app):
"""Assert that the function returns schedules."""
with app.app_context():
service_name = 'PAYBC'
response = StatusService.check_status(service_name=service_name, check_date=arrow.Arrow.utcnow())
assert response is not None
assert response['service'] == service_name
response = StatusService.check_status(service_name=None, check_date=arrow.Arrow.utcnow())
assert response is not None
assert response['service'] is None
assert response['current_status'] == 'None'
assert response['next_up_time'] == 0
response = StatusService.check_status(service_name=service_name, check_date=None)
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'None'
assert response['next_up_time'] == 0
service_name = 'PAYBC1'
response = StatusService.check_status(service_name=service_name, check_date=arrow.Arrow.utcnow())
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'None'
assert response['next_up_time'] == 0
def test_check_status_without_schedule(app):
"""Assert that the function return correct response without schedule setup."""
with app.app_context():
service_name = 'PAYBC'
# without service schedule setup
schedule_json = None
mock_get_schedule = patch('status_api.services.status.Status.get_schedules')
mock_get = mock_get_schedule.start()
mock_get.return_value = schedule_json
response = StatusService().check_status(service_name=service_name, check_date=arrow.Arrow.utcnow())
mock_get.stop()
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'None'
assert response['next_up_time'] == 0
# without service outage and available schedule setup expect return no outage
schedule_json = [{'service_name': 'PAYBC'}]
mock_get_schedule = patch('status_api.services.status.Status.get_schedules')
mock_get = mock_get_schedule.start()
mock_get.return_value = schedule_json
response = StatusService().check_status(service_name=service_name, check_date=arrow.Arrow.utcnow())
mock_get.stop()
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'True'
assert response['next_up_time'] == 0
def test_get_outage_schedule(app):
"""Assert that the get outage schedule function return correct response."""
schedule_json = [
{
'available': [
{'dayofweek': '1', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '2', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '3', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '4', 'from': '15:05', 'to': '21:00'},
{'dayofweek': '5', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '6', 'from': '6:30', 'to': '21:00'},
{'dayofweek': '7', 'from': '6:30', 'to': '21:00'}
],
'outage': [
{'start': '2019-10-23 16:00', 'end': '2019-10-23 18:00'},
{'start': '2019-10-23 16:00', 'end': '2019-10-23 17:00'},
{'start': '2019-10-23 13:00', 'end': '2019-10-25 15:05'}
]
}
]
with app.app_context():
response = StatusService().get_outage_schedules(schedule_json)
assert response is not None
assert response[0][0] == arrow.get('2019-10-23 16:00', 'YYYY-MM-DD HH:mm').replace(tzinfo='US/Pacific')
assert response[2][1] == arrow.get('2019-10-25 15:05', 'YYYY-MM-DD HH:mm').replace(tzinfo='US/Pacific')
schedule_json = [
{
'available': [
{'dayofweek': '1', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '2', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '3', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '4', 'from': '15:05', 'to': '21:00'},
{'dayofweek': '5', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '6', 'from': '6:30', 'to': '21:00'},
{'dayofweek': '7', 'from': '6:30', 'to': '21:00'}
]
}
]
response = StatusService().get_outage_schedules(schedule_json)
assert response is not None
assert len(response) == 0
def test_get_available_schedule(app):
"""Assert that the get availabe schedule function return correct response."""
schedule_json = [
{
'available': [
{'dayofweek': '1', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '2', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '3', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '4', 'from': '15:05', 'to': '21:00'},
{'dayofweek': '5', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '6', 'from': '6:30', 'to': '21:00'},
{'dayofweek': '7', 'from': '6:30', 'to': '21:00'}
],
'outage': [
{'start': '2019-10-23 16:00', 'end': '2019-10-23 18:00'},
{'start': '2019-10-23 16:00', 'end': '2019-10-23 17:00'},
{'start': '2019-10-23 13:00', 'end': '2019-10-25 15:05'}
]
}
]
check_date: arrow.Arrow = arrow.get('2019-10-23 10:00', 'YYYY-MM-DD HH:mm')
check_date_local: arrow.Arrow = check_date.replace(tzinfo='US/Pacific')
with app.app_context():
response = StatusService().get_available_schedules(schedule_json, check_date_local)
assert response is not None
assert response[0][0] == arrow.get('2019-10-28 06:00:00', 'YYYY-MM-DD HH:mm:ss').replace(tzinfo='US/Pacific')
schedule_json = [
{
'outage': [
{'start': '2019-10-23 16:00', 'end': '2019-10-23 18:00'},
{'start': '2019-10-23 16:00', 'end': '2019-10-23 17:00'},
{'start': '2019-10-23 13:00', 'end': '2019-10-25 15:05'}
]
}
]
response = StatusService().get_available_schedules(schedule_json, check_date_local)
assert response is not None
assert len(response) == 0
def test_check_status_no_outage(app):
"""Assert that the function returns schedules."""
schedule_json = [
{
'available': [
{'dayofweek': '1', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '2', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '3', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '4', 'from': '15:05', 'to': '21:00'},
{'dayofweek': '5', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '6', 'from': '6:30', 'to': '21:00'},
{'dayofweek': '7', 'from': '6:30', 'to': '21:00'}
],
'outage': [
{'start': '2019-10-23 10:00', 'end': '2019-10-23 18:00'},
{'start': '2019-10-24 13:00', 'end': '2019-10-25 15:05'}
]
}
]
with app.app_context():
service_name = 'PAYBC'
# Localtime: Sunday, November 24, 2019 10:00:00 AM GMT-08:00
check_date: arrow.Arrow = arrow.get('2019-11-24 18:00', 'YYYY-MM-DD HH:mm')
mock_get_schedule = patch('status_api.services.status.Status.get_schedules')
mock_get = mock_get_schedule.start()
mock_get.return_value = schedule_json
response = StatusService().check_status(service_name=service_name, check_date=check_date)
mock_get.stop()
# no outage, match available schedule
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'True'
def test_check_status_with_outage(app):
"""Assert that the function returns schedules."""
schedule_json = [
{
'available': [
{'dayofweek': '1', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '2', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '3', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '4', 'from': '15:05', 'to': '21:00'},
{'dayofweek': '5', 'from': '6:00', 'to': '21:00'},
{'dayofweek': '6', 'from': '6:30', 'to': '21:00'},
{'dayofweek': '7', 'from': '6:30', 'to': '21:00'}
],
'outage': [
{'start': '2019-10-23 10:00', 'end': '2019-10-23 18:00'},
{'start': '2019-10-24 13:00', 'end': '2019-10-25 15:05'}
]
}
]
with app.app_context():
service_name = 'PAYBC'
# Localtime: Wednesday, October 23, 2019 3:00:00 PM GMT-07:00
check_date: arrow.Arrow = arrow.get('2019-10-23 22:00', 'YYYY-MM-DD HH:mm')
mock_get_schedule = patch('status_api.services.status.Status.get_schedules')
mock_get = mock_get_schedule.start()
mock_get.return_value = schedule_json
response = StatusService().check_status(service_name=service_name, check_date=check_date)
mock_get.stop()
# match outage windows
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'False'
# Localtime: Friday, October 25, 2019 5:00:00 AM GMT-07:00
check_date: arrow.Arrow = arrow.get('2019-10-25 12:00', 'YYYY-MM-DD HH:mm')
mock_get_schedule = patch('status_api.services.status.Status.get_schedules')
mock_get = mock_get_schedule.start()
mock_get.return_value = schedule_json
response = StatusService().check_status(service_name=service_name, check_date=check_date)
mock_get.stop()
# match outage windows
assert response is not None
assert response['service'] == service_name
assert response['current_status'] == 'False'
| 41.385768
| 117
| 0.561538
| 1,383
| 11,050
| 4.355025
| 0.122921
| 0.088328
| 0.034866
| 0.074714
| 0.808235
| 0.794621
| 0.76789
| 0.747468
| 0.735514
| 0.718081
| 0
| 0.093606
| 0.273937
| 11,050
| 266
| 118
| 41.541353
| 0.656986
| 0.127059
| 0
| 0.755208
| 0
| 0
| 0.239024
| 0.024507
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.03125
| false
| 0
| 0.015625
| 0
| 0.046875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13e5760baa8c27f18a57259a1ed5c665115ebd45
| 1,815
|
py
|
Python
|
main/migrations/0010_auto_20190719_0617.py
|
gda2048/thefirst
|
f0a74c0a53d507297c58eb267152f6b17339ac02
|
[
"Apache-2.0"
] | 5
|
2019-08-19T14:49:29.000Z
|
2019-12-19T19:03:54.000Z
|
main/migrations/0010_auto_20190719_0617.py
|
Sirkirill/PhychoBlog
|
f0a74c0a53d507297c58eb267152f6b17339ac02
|
[
"Apache-2.0"
] | 10
|
2020-02-12T00:46:12.000Z
|
2022-02-10T09:16:47.000Z
|
main/migrations/0010_auto_20190719_0617.py
|
Sirkirill/PhychoBlog
|
f0a74c0a53d507297c58eb267152f6b17339ac02
|
[
"Apache-2.0"
] | 1
|
2019-10-10T13:04:11.000Z
|
2019-10-10T13:04:11.000Z
|
# Generated by Django 2.2.3 on 2019-07-19 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20190718_1438'),
]
operations = [
migrations.AddField(
model_name='event',
name='alt',
field=models.CharField(blank=True, max_length=200, verbose_name='Описание'),
),
migrations.AddField(
model_name='event',
name='height',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='photo',
field=models.ImageField(height_field='height', null=True, upload_to='', verbose_name='Изображение',
width_field='width'),
),
migrations.AddField(
model_name='event',
name='width',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='person',
name='alt',
field=models.CharField(blank=True, max_length=200, verbose_name='Описание'),
),
migrations.AddField(
model_name='person',
name='height',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='person',
name='width',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='person',
name='photo',
field=models.ImageField(height_field='height', null=True, upload_to='', verbose_name='Изображение',
width_field='width'),
),
]
| 33
| 111
| 0.54876
| 167
| 1,815
| 5.826347
| 0.299401
| 0.073998
| 0.165468
| 0.194245
| 0.823227
| 0.823227
| 0.741007
| 0.741007
| 0.741007
| 0.741007
| 0
| 0.030378
| 0.328926
| 1,815
| 54
| 112
| 33.611111
| 0.768473
| 0.024793
| 0
| 0.836735
| 1
| 0
| 0.095588
| 0.013009
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b9a65e0b706840e772bf6b1a4890b9d06e263412
| 9,900
|
py
|
Python
|
tests/integration/test_file_formatting.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | 16
|
2020-05-16T17:20:00.000Z
|
2022-02-14T12:08:41.000Z
|
tests/integration/test_file_formatting.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | 19
|
2020-05-18T06:17:42.000Z
|
2020-08-11T07:15:11.000Z
|
tests/integration/test_file_formatting.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright Maciej Patro (maciej.patro@gmail.com)
# MIT License
###############################################################################
from unittest import mock
from approvaltests.approvals import verify
from io import StringIO
from cmake_tidy.formatting import SettingsReader
from tests.integration.test_integration_base import TestIntegrationBase
from tests.integration.utils import execute_cmake_tidy, normalize, get_input_file
class TestFileFormatting(TestIntegrationBase):
def setUp(self):
super(TestFileFormatting, self).setUp()
self.fake_settings = SettingsReader.get_default_format_settings()
@mock.patch('sys.stdout', new_callable=StringIO)
def test_format_command_should_print_file_to_output(self, stdout):
self.format_file('first_example.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_format_against_newline_violations(self, stdout):
self.format_file('newlines_violations.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_format_against_newline_violations_with_custom_settings(self, load_settings, stdout):
self.fake_settings['succeeding_newlines'] = 4
load_settings.return_value = self.fake_settings
self.format_file('newlines_violations.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_format_tabs_with_spaces_replacement(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = True
load_settings.return_value = self.fake_settings
self.format_file('spaces_violations.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_format_bracket_arguments_handling(self, stdout):
self.format_file('arguments.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_format_indentation_of_basic_invocations(self, stdout):
self.format_file('indentations.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_format_indentation_when_spaces_after_command_name_are_present(self, load_settings, stdout):
self.fake_settings['space_between_command_and_begin_parentheses'] = True
self.fake_settings['tabs_as_spaces'] = False
load_settings.return_value = self.fake_settings
self.format_file('indentations.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_format_line_splitting(self, load_settings, stdout):
self.fake_settings['wrap_short_invocations_to_single_line'] = True
self.fake_settings['line_length'] = 80
load_settings.return_value = self.fake_settings
self.format_file('line_length_handling.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_formatting_with_tabs(self, load_settings, stdout):
self.fake_settings['line_length'] = 80
load_settings.return_value = self.fake_settings
self.format_file('line_length_handling.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
def format_file(self, file: str):
self.assertSuccess(execute_cmake_tidy(command='format', arguments=[get_input_file(file)]))
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_formatting_file_with_multiple_settings(self, load_settings, stdout):
self.fake_settings['keywords'] = ['GROUP']
self.fake_settings['wrap_short_invocations_to_single_line'] = False
self.fake_settings['keyword_and_single_value_in_one_line'] = False
load_settings.return_value = self.fake_settings
self.format_file('target_setting.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_formatting_of_install_commands(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = False
self.fake_settings['closing_parentheses_in_newline_when_split'] = True
self.fake_settings['wrap_short_invocations_to_single_line'] = True
self.fake_settings['keyword_and_single_value_in_one_line'] = True
load_settings.return_value = self.fake_settings
self.format_file('install.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_formatting_complicated_conditions(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = False
self.fake_settings['wrap_short_invocations_to_single_line'] = True
load_settings.return_value = self.fake_settings
self.format_file('complicated_conditions.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_formatting_complicated_conditions_splitting_after_operator(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = False
self.fake_settings['wrap_short_invocations_to_single_line'] = True
self.fake_settings['condition_splitting_move_and_or_to_newline'] = False
load_settings.return_value = self.fake_settings
self.format_file('complicated_conditions.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_real_implementation_of_feature_in_cmake(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = False
self.fake_settings['line_length'] = 80
self.fake_settings['closing_parentheses_in_newline_when_split'] = True
self.fake_settings['wrap_short_invocations_to_single_line'] = True
self.fake_settings['keyword_and_single_value_in_one_line'] = True
load_settings.return_value = self.fake_settings
self.format_file('set_of_functions.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_real_implementation_of_feature_in_cmake_split_keywords_and_values(self, load_settings, stdout):
self.fake_settings['tabs_as_spaces'] = False
self.fake_settings['line_length'] = 80
self.fake_settings['closing_parentheses_in_newline_when_split'] = False
self.fake_settings['wrap_short_invocations_to_single_line'] = False
self.fake_settings['keyword_and_single_value_in_one_line'] = False
load_settings.return_value = self.fake_settings
self.format_file('set_of_functions.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_handling_of_single_line_comments_within_different_parts_of_cmake_file(self, load_settings, stdout):
self.fake_settings['wrap_short_invocations_to_single_line'] = True
self.fake_settings['keep_property_and_value_in_one_line'] = True
self.fake_settings['keyword_and_single_value_in_one_line'] = True
self.fake_settings['closing_parentheses_in_newline_when_split'] = True
load_settings.return_value = self.fake_settings
self.format_file('comments.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('cmake_tidy.commands.format.format_command.try_read_settings')
def test_condition_formatting_with_different_kinds_of_parentheses(self, load_settings, stdout):
self.fake_settings['wrap_short_invocations_to_single_line'] = True
load_settings.return_value = self.fake_settings
self.format_file('conditions_with_parentheses.cmake')
normalized_output = normalize(stdout.getvalue())
verify(normalized_output, self.reporter)
| 46.919431
| 112
| 0.742424
| 1,203
| 9,900
| 5.719867
| 0.110557
| 0.055806
| 0.111612
| 0.04447
| 0.837814
| 0.827932
| 0.820956
| 0.808167
| 0.793344
| 0.793344
| 0
| 0.001066
| 0.147273
| 9,900
| 210
| 113
| 47.142857
| 0.814025
| 0.00596
| 0
| 0.726115
| 0
| 0
| 0.236467
| 0.188223
| 0
| 0
| 0
| 0
| 0.006369
| 1
| 0.121019
| false
| 0
| 0.038217
| 0
| 0.165605
| 0.006369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9fbe60971985a2f62eefc3b8ca10d6bb64ba772
| 34,688
|
py
|
Python
|
volatility/volatility/plugins/registry/auditpol.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | 2
|
2018-07-16T13:30:40.000Z
|
2018-07-17T12:02:05.000Z
|
volatility/volatility/plugins/registry/auditpol.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
volatility/volatility/plugins/registry/auditpol.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
# Volatility
# Copyright (C) 2008-2012 Volatile Systems
# Copyright (C) 2011 Jamie Levy (Gleeda) <jamie@memoryanalysis.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
@author: Jamie Levy (gleeda)
@license: GNU General Public License 2.0 or later
@contact: jamie@memoryanalysis.net
@organization: Volatile Systems
"""
import volatility.plugins.registry.registryapi as registryapi
import volatility.debug as debug
import volatility.utils as utils
import volatility.obj as obj
import volatility.plugins.common as common
import volatility.addrspace as addrspace
from volatility.renderers import TreeGrid
# Windows XP types taken from RegRipper auditpol plugin
auditpol_type_xp = {
'AuditPolDataXP' : [ None, {
'Enabled' : [ 0x0, ['unsigned char']],
'System' : [ 0x4, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Logons' : [0x8, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Files' : [0xc, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'UserRights': [0x10, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Process': [0x14, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PolicyChange': [0x18, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountManagement': [0x1c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DirectoryAccess': [0x20, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountLogon': [0x24, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
} ],
}
# Vista and Windows 7 structures taken from http://www.kazamiya.net/files/PolAdtEv_Structure_en_rev2.pdf
auditpol_type_vista = {
'AuditPolDataVista' : [ None, {
# System
'SecurityState' : [ 0xc, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SecuritySystem' : [ 0xe, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SystemIntegrity' : [0x10, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecDriver': [0x12, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SystemOther': [0x14, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Logon/Logoff
'Logon': [0x16, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Logoff': [0x18, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountLockout': [0x1a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecMainMode': [0x1c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SpecialLogon': [0x1e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecQuickMode': [0x20, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecExtended': [0x22, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'LogonOther': [0x24, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'NetworkPolicyServer': [0x26, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# File Object
'FileSystem': [0x28, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Registry': [0x2a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KernelObject': [0x2c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SAM': [0x2e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ObjectOther': [0x30, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Certification': [0x32, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Application': [0x34, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'HandleManipulation': [0x36, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'FileShare': [0x38, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PacketDrop': [0x3a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PlatformConnection': [0x3c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Privelege Use
'Sensitive': [0x3e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'NonSensitive': [0x40, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PrivilegeOther': [0x42, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
#Detailed Tracking
'ProcessCreation': [0x44, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ProcessTermination': [0x46, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DPAPI': [0x48, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'RPC': [0x4a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Policy Change
'AuditPolicyChange': [0x4c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AuthenticationPolicyChange': [0x4e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AuthorizationPolicyChange': [0x50, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'MPSSVCRule': [0x52, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'FilteringPlatformPolicyChange': [0x54, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PolicyOther': [0x56, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Account Management
'UserAccount': [0x58, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ComputerAccount': [0x5a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SecurityGroup': [0x5c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DistributionGroup': [0x5e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ApplicationGroup': [0x60, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountOther': [0x62, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# DS ACcess
'DirectoryServiceAccess': [0x64, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DirectoryServiceChange': [0x66, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DirectoryServiceReplication': [0x68, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DetailedDirServReplication': [0x6a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Account Logon
'CredentialValidation': [0x6c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KerberosOperations': [0x6e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountLogonOther': [0x70, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KerberosAuthentication': [0x72, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
} ],
}
auditpol_type_win7 = {
'AuditPolData7' : [ None, {
# System
'SecurityState' : [ 0xc, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SecuritySystem' : [ 0xe, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SystemIntegrity' : [0x10, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecDriver': [0x12, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SystemOther': [0x14, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Logon/Logoff
'Logon': [0x16, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Logoff': [0x18, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountLockout': [0x1a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecMainMode': [0x1c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SpecialLogon': [0x1e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecQuickMode': [0x20, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'IPSecExtended': [0x22, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'LogonOther': [0x24, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'NetworkPolicyServer': [0x26, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# File Object
'FileSystem': [0x28, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Registry': [0x2a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KernelObject': [0x2c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SAM': [0x2e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ObjectOther': [0x30, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Certification': [0x32, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Application': [0x34, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'HandleManipulation': [0x36, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'FileShare': [0x38, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PacketDrop': [0x3a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PlatformConnection': [0x3c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DetailedFileShare': [0x3e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Privelege Use
'Sensitive': [0x40, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'NonSensitive': [0x42, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PrivilegeOther': [0x44, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
#Detailed Tracking
'ProcessCreation': [0x46, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ProcessTermination': [0x48, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DPAPI': [0x4a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'RPC': [0x4c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Policy Change
'AuditPolicyChange': [0x4e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AuthenticationPolicyChange': [0x50, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AuthorizationPolicyChange': [0x52, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'MPSSVCRule': [0x54, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'FilteringPlatformPolicyChange': [0x56, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'PolicyOther': [0x58, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Account Management
'UserAccount': [0x5a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ComputerAccount': [0x5c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'SecurityGroup': [0x5e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DistributionGroup': [0x60, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ApplicationGroup': [0x62, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountOther': [0x64, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# DS ACcess
'DirectoryServiceAccess': [0x66, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DirectoryServiceChange': [0x68, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DirectoryServiceReplication': [0x6a, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'DetailedDirServReplication': [0x6c, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
# Account Logon
'CredentialValidation': [0x6e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KerberosOperations': [0x70, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'AccountLogonOther': [0x72, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'KerberosAuthentication': [0x74, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
} ],
}
auditpol_type_win8 = {
'AuditPolData8' : [ None, {
'Logon': [22, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Logoff': [24, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Sensitive': [70, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ProcessCreation': [76, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
}],
}
auditpol_type_win10 = {
'AuditPolData10' : [ None, {
'Logon': [0x16, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Logoff': [0x18, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'Sensitive': [0x48, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
'ProcessCreation': [0x4e, ['Enumeration', dict(target = 'unsigned short', choices = {0x00: "Not Logged", 0x01: "S", 0x02: "F", 0x03: "S/F"})]],
}],
}
# this are not used, but left here since they are more descriptive
class AuditPolDataXP(obj.CType):
def __str__(self):
audit = "Disabled"
if int(self.Enabled) != 0:
audit = "Enabled"
msg = "Auditing is {0}\n\tAudit System Events: {1}\n\tAudit Logon Events: {2}\n\tAudit Object Access: {3}\n\t".format(
audit, self.System, self.Logons, self.Files)
msg += "Audit Privilege Use: {0}\n\tAudit Process Tracking: {1}\n\tAudit Policy Change: {2}\n\tAudit Account Management: {3}\n\t".format(
self.UserRights, self.Process, self.PolicyChange, self.AccountManagement)
msg += "Audit Dir Service Access: {0}\n\tAudit Account Logon Events: {1}\n".format(self.DirectoryAccess, self.AccountLogon)
return msg
class AuditPolDataVista(obj.CType):
def __str__(self):
msg = "System Events:\n\tSecurity State Change: {0}\n\tSecurity System Extention: {1}\n\tSystem Integrity: {2}\n\t".format(
self.SecurityState, self.SecuritySystem, self.SystemIntegrity)
msg += "IPSec Driver: {0}\n\tOther System Events: {1}\n".format(
self.IPSecDriver, self.SystemOther)
msg += "Logon/Logoff Events:\n\tLogon: {0}\n\tLogoff: {1}\n\tAccount Lockout: {2}\n\t".format(
self.Logon, self.Logoff, self.AccountLockout)
msg += "IPSec Main Mode: {0}\n\tSpecial Logon: {1}\n\tIPSec Quick Mode: {2}\n\tIPSec Extended Mode: {3}\n\t".format(
self.IPSecMainMode, self.SpecialLogon, self.IPSecQuickMode, self.IPSecExtended)
msg += "Other Logon Events: {0}\n\tNetwork Policy Server: {1}\n".format(
self.LogonOther, self.NetworkPolicyServer)
msg += "Object Access Events:\n\tFile System: {0}\n\tRegistry: {1}\n\tKernel Object: {2}\n\t".format(
self.FileSystem, self.Registry, self.KernelObject)
msg += "SAM: {0}\n\tOther Object Events: {1}\n\tCertification Services: {2}\n\tApplication Generated: {3}\n\t".format(
self.SAM, self.ObjectOther, self.Certification, self.Application)
msg += "Handle Manipulation: {0}\n\tFile Share: {1}\n\tFiltering Platform Packet Drop: {2}\n\t".format(
self.HandleManipulation, self.FileShare, self.PacketDrop)
msg += "Filtering Platform Connection: {0}\nPrivelege Use:\n\t".format(
self.PlatformConnection)
msg += "Sensitive: {0}\n\tNon Sensitive{1}\n\tOther Privilege Use Events{2}\nDetailed Tracking:\n\t".format(
self.Sensitive, self.NonSensitive, self.PrivilegeOther)
msg += "Process Creation: {0}\n\tProcess Termination: {1}\n\tDPAPI Activity: {2}\n\tRPC Events\n".format(
self.ProcessCreation, self.ProcessTermination, self.DPAPI, self.RPC)
msg += "Policy Change Events:\n\tAudit Policy Change: {0}\n\tAuthentication Policy Change: {1}\n\t".format(
self.AuditPolicyChange, self.AuthenticationPolicyChange)
msg += "Authorization Policy Change: {0}\n\tMPSSVC Rule: {1}\n\tFiltering Platform Policy Change: {2}\n\t".format(
self.AuthorizationPolicyChange, self.MPSSVCRule, self.FilteringPlatformPolicyChange)
msg += "Other Policy Events: {0}\nAccount Management Events:\n\tUser Account Management: {1}\n\t".format(
self.PolicyOther, self.UserAccount)
msg += "Computer Account Management: {0}\n\tSecurity Group Management: {1}\n\tDistribution Group Management: {2}\n\t".format(
self.ComputerAccount, self.SecurityGroup, self.DistributionGroup)
msg += "Application Group Management: {0}\n\tOther Account Management Events: {1}\nDS Access Events:\n\t".format(
self.ApplicationGroup, self.AccountOther)
msg += "Directory Service Access: {0}\n\tDirectory Service Changes: {1}\n\tDirectory Service Replication: {2}\n\t".format(
self.DirectoryServiceAccess, self.DirectoryServiceChange, self.DirectoryServiceReplication)
msg += "Detailed Directory Service Replication: {0}\nAccount Logon Events:\n\tCredential Validation: {1}\n\t".format(
self.DetailedDirServReplication, self.CredentialValidation)
msg += "Kerberos Service Ticket Operations: {0}\n\tOther Account Logon Events: {1}\n\tKerberos Authentication Service: {2}\n".format(
self.KerberosOperations, self.AccountLogonOther, self.KerberosAuthentication)
return msg
class AuditPolData8(obj.CType):
def __str__(self):
msg = "\nLogon: {0}\n\tLogoff: {1}\n\tSensitive Privilegs: {2}\n\tProcess Creation: {3}\n\t".format(
self.Logon, self.Logoff, self.Sensitive, self.ProcessCreation)
return msg
class AuditPolData10(obj.CType):
def __str__(self):
msg = "\nLogon: {0}\n\tLogoff: {1}\n\tSensitive Privilegs: {2}\n\tProcess Creation: {3}\n\t".format(
self.Logon, self.Logoff, self.Sensitive, self.ProcessCreation)
return msg
class AuditPolData7(obj.CType):
def __str__(self):
msg = "System Events:\n\tSecurity State Change: {0}\n\tSecurity System Extention: {1}\n\tSystem Integrity: {2}\n\t".format(
self.SecurityState, self.SecuritySystem, self.SystemIntegrity)
msg += "IPSec Driver: {0}\n\tOther System Events: {1}\n".format(
self.IPSecDriver, self.SystemOther)
msg += "Logon/Logoff Events:\n\tLogon: {0}\n\tLogoff: {1}\n\tAccount Lockout: {2}\n\t".format(
self.Logon, self.Logoff, self.AccountLockout)
msg += "IPSec Main Mode: {0}\n\tSpecial Logon: {1}\n\tIPSec Quick Mode: {2}\n\tIPSec Extended Mode: {3}\n\t".format(
self.IPSecMainMode, self.SpecialLogon, self.IPSecQuickMode, self.IPSecExtended)
msg += "Other Logon Events: {0}\n\tNetwork Policy Server: {1}\n".format(
self.LogonOther, self.NetworkPolicyServer)
msg += "Object Access Events:\n\tFile System: {0}\n\tRegistry: {1}\n\tKernel Object: {2}\n\t".format(
self.FileSystem, self.Registry, self.KernelObject)
msg += "SAM: {0}\n\tOther Object Events: {1}\n\tCertification Services: {2}\n\tApplication Generated: {3}\n\t".format(
self.SAM, self.ObjectOther, self.Certification, self.Application)
msg += "Handle Manipulation: {0}\n\tFile Share: {1}\n\tFiltering Platform Packet Drop: {2}\n\t".format(
self.HandleManipulation, self.FileShare, self.PacketDrop)
msg += "Filtering Platform Connection: {0}\n\tDetailed File Share: {1}\nPrivelege Use:\n\t".format(
self.PlatformConnection, self.DetailedFileShare)
msg += "Sensitive: {0}\n\tNon Sensitive{1}\n\tOther Privilege Use Events{2}\nDetailed Tracking:\n\t".format(
self.Sensitive, self.NonSensitive, self.PrivilegeOther)
msg += "Process Creation: {0}\n\tProcess Termination: {1}\n\tDPAPI Activity: {2}\n\tRPC Events\n".format(
self.ProcessCreation, self.ProcessTermination, self.DPAPI, self.RPC)
msg += "Policy Change Events:\n\tAudit Policy Change: {0}\n\tAuthentication Policy Change: {1}\n\t".format(
self.AuditPolicyChange, self.AuthenticationPolicyChange)
msg += "Authorization Policy Change: {0}\n\tMPSSVC Rule: {1}\n\tFiltering Platform Policy Change: {2}\n\t".format(
self.AuthorizationPolicyChange, self.MPSSVCRule, self.FilteringPlatformPolicyChange)
msg += "Other Policy Events: {0}\nAccount Management Events:\n\tUser Account Management: {1}\n\t".format(
self.PolicyOther, self.UserAccount)
msg += "Computer Account Management: {0}\n\tSecurity Group Management: {1}\n\tDistribution Group Management: {2}\n\t".format(
self.ComputerAccount, self.SecurityGroup, self.DistributionGroup)
msg += "Application Group Management: {0}\n\tOther Account Management Events: {1}\nDS Access Events:\n\t".format(
self.ApplicationGroup, self.AccountOther)
msg += "Directory Service Access: {0}\n\tDirectory Service Changes: {1}\n\tDirectory Service Replication: {2}\n\t".format(
self.DirectoryServiceAccess, self.DirectoryServiceChange, self.DirectoryServiceReplication)
msg += "Detailed Directory Service Replication: {0}\nAccount Logon Events:\n\tCredential Validation: {1}\n\t".format(
self.DetailedDirServReplication, self.CredentialValidation)
msg += "Kerberos Service Ticket Operations: {0}\n\tOther Account Logon Events: {1}\n\tKerberos Authentication Service: {2}\n".format(
self.KerberosOperations, self.AccountLogonOther, self.KerberosAuthentication)
return msg
class AuditpolTypesXP(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 5,
'minor': lambda x: x <= 1}
def modification(self, profile):
profile.object_classes.update({
'AuditPolDataXP': AuditPolDataXP,
})
profile.vtypes.update(auditpol_type_xp)
class AuditpolTypesVista(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0}
def modification(self, profile):
profile.object_classes.update({
'AuditPolDataVista': AuditPolDataVista,
})
profile.vtypes.update(auditpol_type_vista)
class AudipolWin7(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 1}
def modification(self, profile):
profile.object_classes.update({
'AuditPolData7': AuditPolData7,
})
profile.vtypes.update(auditpol_type_win7)
class AudipolWin8(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 2 or x == 3 }
def modification(self, profile):
profile.object_classes.update({
'AuditPolData8': AuditPolData8,
})
profile.vtypes.update(auditpol_type_win8)
class AudipolWin10(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x >= 4}
def modification(self, profile):
profile.object_classes.update({
'AuditPolData10': AuditPolData10,
})
profile.vtypes.update(auditpol_type_win10)
class Auditpol(common.AbstractWindowsCommand):
"""Prints out the Audit Policies from HKLM\\SECURITY\\Policy\\PolAdtEv"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
config.add_option('HEX', short_option = 'H', default = False,
help = 'Output HEX of Policy\\PolAdtEv key',
action = "store_true")
@staticmethod
def is_valid_profile(profile):
return profile.metadata.get('os', 'unknown').lower() == 'windows'
def get_yield(self, ap):
for k in ap.members.keys():
yield (0, ["{0}".format(k), "{0}".format(ap.m(k))])
def calculate(self):
addr_space = utils.load_as(self._config)
regapi = registryapi.RegistryApi(self._config)
regapi.reset_current()
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
for value, data_raw in regapi.reg_yield_values('security', 'Policy\\PolAdtEv', thetype = 'REG_NONE'):
bufferas = addrspace.BufferAddressSpace(self._config, data = data_raw)
if version <= (5, 1):
ap = obj.Object("AuditPolDataXP", offset = 0, vm = bufferas)
elif version <= (6, 0):
ap = obj.Object("AuditPolDataVista", offset = 0, vm = bufferas)
elif version == (6, 1):
ap = obj.Object("AuditPolData7", offset = 0, vm = bufferas)
elif version == (6, 2) or version == (6, 3):
ap = obj.Object("AuditPolData8", offset = 0, vm = bufferas)
else:
ap = obj.Object("AuditPolData10", offset = 0, vm = bufferas)
if ap == None:
debug.error("No AuditPol data found")
yield data_raw, ap
def unified_output(self, data):
return TreeGrid([("Item", str),
("Detail", str)],
self.generator(data))
def generator(self, data):
first = True
for data_raw, ap in data:
if first and hasattr(ap, "Enabled"):
first = False
audit = "Disabled"
if int(ap.Enabled) != 0:
audit = "Enabled"
yield (0, ["GeneralAuditing", audit])
for k in ap.members.keys():
if k != "Enabled":
yield (0, ["{0}".format(k), "{0}".format(ap.m(k))])
if self._config.HEX:
# for now, not sure how to handle hexdump data
raw = "\n".join(["{0:010x}: {1:<48} {2}".format(o, h, ''.join(c)) for o, h, c in utils.Hexdump(data_raw)])
print raw
def render_text(self, outfd, data):
for data_raw, ap in data:
if self._config.HEX:
raw = "\n".join(["{0:010x}: {1:<48} {2}".format(o, h, ''.join(c)) for o, h, c in utils.Hexdump(data_raw)])
outfd.write(raw + "\n\n")
outfd.write("{0}\n".format(str(ap)))
| 77.60179
| 165
| 0.599977
| 4,079
| 34,688
| 5.081638
| 0.116205
| 0.088286
| 0.123601
| 0.170687
| 0.84176
| 0.825164
| 0.820629
| 0.810353
| 0.802827
| 0.802827
| 0
| 0.073438
| 0.209784
| 34,688
| 446
| 166
| 77.775785
| 0.682755
| 0.037535
| 0
| 0.515235
| 0
| 0.102493
| 0.335398
| 0.015829
| 0
| 0
| 0.073103
| 0
| 0
| 0
| null | null | 0
| 0.019391
| null | null | 0.00277
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6a21038bd46dd3f7a89c41365a5c1926dd505f28
| 1,987
|
py
|
Python
|
test/pyaz/provider/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/provider/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/provider/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from .. pyaz_utils import get_cli_name, get_params
def list(__TOP=None, expand=None):
params = get_params(locals())
command = "az provider list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(namespace, expand=None):
params = get_params(locals())
command = "az provider show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def register(namespace, consent_to_permissions=None, management_group_id=None, wait=None, accept_terms=None):
params = get_params(locals())
command = "az provider register " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def unregister(namespace, wait=None):
params = get_params(locals())
command = "az provider unregister " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 33.116667
| 109
| 0.661802
| 240
| 1,987
| 5.416667
| 0.204167
| 0.086154
| 0.061538
| 0.058462
| 0.834615
| 0.834615
| 0.834615
| 0.834615
| 0.77
| 0.696154
| 0
| 0.005175
| 0.221943
| 1,987
| 59
| 110
| 33.677966
| 0.835705
| 0
| 0
| 0.814815
| 0
| 0
| 0.059386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a2231c76d2204ab68909cbad2a79e474e00bcd2
| 8,096
|
py
|
Python
|
FaceDetection/Yolo_support/yolo3/models/yolo3_mobilenetv3_small.py
|
nileshpd1211/SRIP-UCSD---Efficient-Deep-Networks
|
7b75f639e3f6dfed60a4b24915796d0b59eafa85
|
[
"MIT"
] | 1
|
2020-05-06T04:04:19.000Z
|
2020-05-06T04:04:19.000Z
|
yolo3/models/yolo3_mobilenetv3_small.py
|
JsonSadler/keras-YOLOv3-model-set
|
378bee8853bc2c547f379f707098a39025a0ff83
|
[
"MIT"
] | null | null | null |
yolo3/models/yolo3_mobilenetv3_small.py
|
JsonSadler/keras-YOLOv3-model-set
|
378bee8853bc2c547f379f707098a39025a0ff83
|
[
"MIT"
] | 1
|
2021-03-10T09:02:27.000Z
|
2021-03-10T09:02:27.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 MobileNetV3Small Model Defined in Keras."""
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from common.backbones.mobilenet_v3 import MobileNetV3Small
from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, make_last_layers, make_depthwise_separable_last_layers, make_spp_depthwise_separable_last_layers
def yolo3_mobilenetv3small_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V3 MobileNetV3Small model CNN body in Keras."""
mobilenetv3small = MobileNetV3Small(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# activation_31(layer 165, final feature map): 13 x 13 x (576*alpha)
# expanded_conv_10/Add(layer 162, end of block10): 13 x 13 x (96*alpha)
# activation_22(layer 117, middle in block8) : 26 x 26 x (288*alpha)
# expanded_conv_7/Add(layer 114, end of block7) : 26 x 26 x (48*alpha)
# activation_7(layer 38, middle in block3) : 52 x 52 x (96*alpha)
# expanded_conv_2/Add(layer 35, end of block2): 52 x 52 x (24*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3small.layers[165].output
# f1 :13 x 13 x (576*alpha)
x, y1 = make_last_layers(f1, int(288*alpha), num_anchors * (num_classes + 5))
#x, y1 = make_last_layers(f1, int(288*alpha), num_anchors * (num_classes + 5), predict_filters=int(1024*alpha))
x = compose(
DarknetConv2D_BN_Leaky(int(144*alpha), (1,1)),
UpSampling2D(2))(x)
f2 = mobilenetv3small.layers[117].output
# f2: 26 x 26 x (288*alpha)
x = Concatenate()([x,f2])
x, y2 = make_last_layers(x, int(96*alpha), num_anchors*(num_classes+5))
#x, y2 = make_last_layers(x, int(96*alpha), num_anchors*(num_classes+5), predict_filters=int(512*alpha))
x = compose(
DarknetConv2D_BN_Leaky(int(48*alpha), (1,1)),
UpSampling2D(2))(x)
f3 = mobilenetv3small.layers[38].output
# f3 : 52 x 52 x (96*alpha)
x = Concatenate()([x, f3])
x, y3 = make_last_layers(x, int(48*alpha), num_anchors*(num_classes+5))
#x, y3 = make_last_layers(x, int(48*alpha), num_anchors*(num_classes+5), predict_filters=int(256*alpha))
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_mobilenetv3small_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create YOLO_v3 Lite MobileNetV3Small model CNN body in keras.'''
mobilenetv3small = MobileNetV3Small(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# activation_31(layer 165, final feature map): 13 x 13 x (576*alpha)
# expanded_conv_10/Add(layer 162, end of block10): 13 x 13 x (96*alpha)
# activation_22(layer 117, middle in block8) : 26 x 26 x (288*alpha)
# expanded_conv_7/Add(layer 114, end of block7) : 26 x 26 x (48*alpha)
# activation_7(layer 38, middle in block3) : 52 x 52 x (96*alpha)
# expanded_conv_2/Add(layer 35, end of block2): 52 x 52 x (24*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3small.layers[165].output
# f1 :13 x 13 x (576*alpha)
x, y1 = make_depthwise_separable_last_layers(f1, int(288*alpha), num_anchors * (num_classes + 5))
#x, y1 = make_depthwise_separable_last_layers(f1, int(288*alpha), num_anchors * (num_classes + 5), predict_filters=int(1024*alpha))
x = compose(
DarknetConv2D_BN_Leaky(int(144*alpha), (1,1)),
UpSampling2D(2))(x)
f2 = mobilenetv3small.layers[117].output
# f2: 26 x 26 x (288*alpha)
x = Concatenate()([x,f2])
x, y2 = make_depthwise_separable_last_layers(x, int(96*alpha), num_anchors*(num_classes+5))
#x, y2 = make_depthwise_separable_last_layers(x, int(96*alpha), num_anchors*(num_classes+5), predict_filters=int(512*alpha))
x = compose(
DarknetConv2D_BN_Leaky(int(48*alpha), (1,1)),
UpSampling2D(2))(x)
f3 = mobilenetv3small.layers[38].output
# f3 : 52 x 52 x (96*alpha)
x = Concatenate()([x, f3])
x, y3 = make_depthwise_separable_last_layers(x, int(48*alpha), num_anchors*(num_classes+5))
#x, y3 = make_depthwise_separable_last_layers(x, int(48*alpha), num_anchors*(num_classes+5), predict_filters=int(256*alpha))
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_mobilenetv3small_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create Tiny YOLO_v3 MobileNetV3Small model CNN body in keras.'''
mobilenetv3small = MobileNetV3Small(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# activation_31(layer 165, final feature map): 13 x 13 x (576*alpha)
# expanded_conv_10/Add(layer 162, end of block10): 13 x 13 x (96*alpha)
# activation_22(layer 117, middle in block8) : 26 x 26 x (288*alpha)
# expanded_conv_7/Add(layer 114, end of block7) : 26 x 26 x (48*alpha)
# activation_7(layer 38, middle in block3) : 52 x 52 x (96*alpha)
# expanded_conv_2/Add(layer 35, end of block2): 52 x 52 x (24*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
x1 = mobilenetv3small.layers[117].output
x2 = mobilenetv3small.layers[165].output
x2 = DarknetConv2D_BN_Leaky(int(288*alpha), (1,1))(x2)
y1 = compose(
DarknetConv2D_BN_Leaky(int(576*alpha), (3,3)),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=int(576*alpha), kernel_size=(3, 3), block_id_str='15'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(int(144*alpha), (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(int(288*alpha), (3,3)),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=int(288*alpha), kernel_size=(3, 3), block_id_str='16'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
def tiny_yolo3lite_mobilenetv3small_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create Tiny YOLO_v3 Lite MobileNetV3Small model CNN body in keras.'''
mobilenetv3small = MobileNetV3Small(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# activation_31(layer 165, final feature map): 13 x 13 x (576*alpha)
# expanded_conv_10/Add(layer 162, end of block10): 13 x 13 x (96*alpha)
# activation_22(layer 117, middle in block8) : 26 x 26 x (288*alpha)
# expanded_conv_7/Add(layer 114, end of block7) : 26 x 26 x (48*alpha)
# activation_7(layer 38, middle in block3) : 52 x 52 x (96*alpha)
# expanded_conv_2/Add(layer 35, end of block2): 52 x 52 x (24*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
x1 = mobilenetv3small.layers[117].output
x2 = mobilenetv3small.layers[165].output
x2 = DarknetConv2D_BN_Leaky(int(288*alpha), (1,1))(x2)
y1 = compose(
#DarknetConv2D_BN_Leaky(int(576*alpha), (3,3)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=int(576*alpha), kernel_size=(3, 3), block_id_str='15'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(int(144*alpha), (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
#DarknetConv2D_BN_Leaky(int(288*alpha), (3,3)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=int(288*alpha), kernel_size=(3, 3), block_id_str='16'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
| 45.483146
| 214
| 0.665884
| 1,223
| 8,096
| 4.235487
| 0.107931
| 0.03861
| 0.050193
| 0.07722
| 0.934556
| 0.922394
| 0.922394
| 0.922394
| 0.922394
| 0.922394
| 0
| 0.107093
| 0.207633
| 8,096
| 177
| 215
| 45.740113
| 0.70039
| 0.436018
| 0
| 0.742857
| 0
| 0
| 0.008355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.057143
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a2e7883e37f04872dfbca542a175e03562af762
| 56,050
|
py
|
Python
|
docker-images/taigav2/taiga-back/tests/integration/test_importer_api.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 1
|
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/tests/integration/test_importer_api.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
docker-images/taigav2/taiga-back/tests/integration/test_importer_api.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import base64
from django.apps import apps
from django.core.urlresolvers import reverse
from django.core.files.base import ContentFile
from taiga.base.utils import json
from taiga.export_import import services
from taiga.export_import.exceptions import TaigaImportError
from taiga.projects.models import Project, Membership
from taiga.projects.issues.models import Issue
from taiga.projects.userstories.models import UserStory
from taiga.projects.tasks.models import Task
from taiga.projects.wiki.models import WikiPage
from .. import factories as f
from ..utils import DUMMY_BMP_DATA
pytestmark = pytest.mark.django_db
#######################################################
## test api/v1/importer
#######################################################
def test_invalid_project_import(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_project_import_without_extra_data(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{"name": "Role"}],
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.data
must_empty_children = [
"issues", "user_stories", "us_statuses", "wiki_pages", "priorities",
"severities", "milestones", "points", "issue_types", "task_statuses",
"issue_statuses", "wiki_links",
]
assert all(map(lambda x: len(response.data[x]) == 0, must_empty_children))
assert response.data["owner"] == user.email
assert response.data["watchers"] == [user.email, user_watching.email]
def test_valid_project_without_enough_public_projects_slots(client):
user = f.UserFactory.create(max_public_projects=0)
url = reverse("importer-list")
data = {
"slug": "public-project-without-slots",
"name": "Imported project",
"description": "Imported project",
"roles": [{"name": "Role"}],
"is_private": False
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert "can't have more public projects" in response.data["_error_message"]
assert Project.objects.filter(slug="public-project-without-slots").count() == 0
assert response["Taiga-Info-Project-Memberships"] == "1"
assert response["Taiga-Info-Project-Is-Private"] == "False"
def test_valid_project_without_enough_private_projects_slots(client):
user = f.UserFactory.create(max_private_projects=0)
url = reverse("importer-list")
data = {
"slug": "private-project-without-slots",
"name": "Imported project",
"description": "Imported project",
"roles": [{"name": "Role"}],
"is_private": True
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert "can't have more private projects" in response.data["_error_message"]
assert response["Taiga-Info-Project-Memberships"] == "1"
assert response["Taiga-Info-Project-Is-Private"] == "True"
assert Project.objects.filter(slug="private-project-without-slots").count() == 0
def test_valid_project_with_enough_public_projects_slots(client):
user = f.UserFactory.create(max_public_projects=1)
url = reverse("importer-list")
data = {
"slug": "public-project-with-slots",
"name": "Imported project",
"description": "Imported project",
"roles": [{"name": "Role"}],
"is_private": False
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert Project.objects.filter(slug="public-project-with-slots").count() == 1
def test_valid_project_with_enough_private_projects_slots(client):
user = f.UserFactory.create(max_private_projects=1)
url = reverse("importer-list")
data = {
"slug": "private-project-with-slots",
"name": "Imported project",
"description": "Imported project",
"roles": [{"name": "Role"}],
"is_private": True
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert Project.objects.filter(slug="private-project-with-slots").count() == 1
def test_valid_project_import_with_not_existing_memberships(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"memberships": [{
"email": "bad@email.com",
"role": "Role",
}],
"roles": [{"name": "Role"}]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
# The new membership and the owner membership
assert len(response.data["memberships"]) == 2
def test_valid_project_import_with_membership_uuid_rewrite(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"memberships": [{
"email": "with-uuid@email.com",
"role": "Role",
"token": "123",
}],
"roles": [{"name": "Role"}]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert Membership.objects.filter(email="with-uuid@email.com", token="123").count() == 0
def test_valid_project_import_with_extra_data(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{
"permissions": [],
"name": "Test"
}],
"us_statuses": [{
"name": "Test"
}],
"severities": [{
"name": "Test"
}],
"priorities": [{
"name": "Test"
}],
"points": [{
"name": "Test"
}],
"issue_types": [{
"name": "Test"
}],
"task_statuses": [{
"name": "Test"
}],
"issue_statuses": [{
"name": "Test"
}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
must_empty_children = [
"issues", "user_stories", "wiki_pages", "milestones",
"wiki_links",
]
must_one_instance_children = [
"roles", "us_statuses", "severities", "priorities", "points",
"issue_types", "task_statuses", "issue_statuses", "memberships",
]
assert all(map(lambda x: len(response.data[x]) == 0, must_empty_children))
# Allwais is created at least the owner membership
assert all(map(lambda x: len(response.data[x]) == 1, must_one_instance_children))
assert response.data["owner"] == user.email
def test_invalid_project_import_without_roles(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 2
assert Project.objects.filter(slug="imported-project").count() == 0
def test_invalid_project_import_with_extra_data(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{
"permissions": [],
"name": "Test"
}],
"us_statuses": [{}],
"severities": [{}],
"priorities": [{}],
"points": [{}],
"issue_types": [{}],
"task_statuses": [{}],
"issue_statuses": [{}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 7
assert Project.objects.filter(slug="imported-project").count() == 0
def test_valid_project_import_with_custom_attributes(client):
user = f.UserFactory.create()
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{
"permissions": [],
"name": "Test"
}],
"userstorycustomattributes": [{
"name": "custom attribute example 1",
"description": "short description 1",
"order": 1
}],
"taskcustomattributes": [{
"name": "custom attribute example 1",
"description": "short description 1",
"order": 1
}],
"issuecustomattributes": [{
"name": "custom attribute example 1",
"description": "short description 1",
"order": 1
}]
}
must_empty_children = ["issues", "user_stories", "wiki_pages", "milestones", "wiki_links"]
must_one_instance_children = ["userstorycustomattributes", "taskcustomattributes", "issuecustomattributes"]
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert all(map(lambda x: len(response.data[x]) == 0, must_empty_children))
# Allwais is created at least the owner membership
assert all(map(lambda x: len(response.data[x]) == 1, must_one_instance_children))
assert response.data["owner"] == user.email
def test_invalid_project_import_with_custom_attributes(client):
user = f.UserFactory.create()
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{
"permissions": [],
"name": "Test"
}],
"userstorycustomattributes": [{ }],
"taskcustomattributes": [{ }],
"issuecustomattributes": [{ }]
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 3
assert Project.objects.filter(slug="imported-project").count() == 0
#######################################################
## tes api/v1/importer/milestone
#######################################################
def test_invalid_milestone_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-milestone", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_milestone_import(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-milestone", args=[project.pk])
data = {
"name": "Imported milestone",
"estimated_start": "2014-10-10",
"estimated_finish": "2014-10-20",
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["watchers"] == [user_watching.email]
def test_milestone_import_duplicated_milestone(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-milestone", args=[project.pk])
data = {
"name": "Imported milestone",
"estimated_start": "2014-10-10",
"estimated_finish": "2014-10-20",
}
# We create twice the same milestone
response = client.json.post(url, json.dumps(data))
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert response.data["milestones"][0]["name"][0] == "Name duplicated for the project"
#######################################################
## tes api/v1/importer/us
#######################################################
def test_invalid_us_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_us_import_without_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Test"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
def test_valid_us_import_with_extra_data(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Imported us",
"description": "Imported us",
"attachments": [{
"owner": user.email,
"attached_file": {
"name": "imported attachment",
"data": base64.b64encode(b"TEST").decode("utf-8")
}
}],
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert len(response.data["attachments"]) == 1
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
assert response.data["watchers"] == [user_watching.email]
def test_invalid_us_import_with_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Imported us",
"description": "Imported us",
"attachments": [{}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
assert UserStory.objects.filter(subject="Imported us").count() == 0
def test_invalid_us_import_with_bad_choices(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Imported us",
"description": "Imported us",
"status": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
#######################################################
## tes api/v1/importer/task
#######################################################
def test_invalid_task_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_task_import_without_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Test"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
def test_valid_task_import_with_custom_attributes_values(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
membership = f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
project.save()
custom_attr = f.TaskCustomAttributeFactory(project=project)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Test Custom Attrs Values Tasks",
"custom_attributes_values": {
custom_attr.name: "test_value"
}
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
custom_attributes_values = apps.get_model("custom_attributes.TaskCustomAttributesValues").objects.get(
task__subject=response.data["subject"])
assert custom_attributes_values.attributes_values == {str(custom_attr.id): "test_value"}
def test_valid_task_import_with_extra_data(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Imported task",
"description": "Imported task",
"attachments": [{
"owner": user.email,
"attached_file": {
"name": "imported attachment",
"data": base64.b64encode(b"TEST").decode("utf-8")
}
}],
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert len(response.data["attachments"]) == 1
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
assert response.data["watchers"] == [user_watching.email]
def test_invalid_task_import_with_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Imported task",
"description": "Imported task",
"attachments": [{}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
assert Task.objects.filter(subject="Imported task").count() == 0
def test_invalid_task_import_with_bad_choices(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Imported task",
"description": "Imported task",
"status": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
def test_valid_task_with_user_story(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_task_status = f.TaskStatusFactory.create(project=project)
us = f.UserStoryFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-task", args=[project.pk])
data = {
"subject": "Imported task",
"description": "Imported task",
"user_story": us.ref
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert us.tasks.all().count() == 1
#######################################################
## tes api/v1/importer/issue
#######################################################
def test_invalid_issue_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-issue", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_user_story_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Imported issue",
"finish_date": "2014-10-24T00:00:00+0000"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["subject"] == "Imported issue"
assert response.data["finish_date"] == "2014-10-24T00:00:00+0000"
def test_valid_user_story_import_with_custom_attributes_values(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
membership = f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_us_status = f.UserStoryStatusFactory.create(project=project)
project.save()
custom_attr = f.UserStoryCustomAttributeFactory(project=project)
url = reverse("importer-us", args=[project.pk])
data = {
"subject": "Test Custom Attrs Values User Story",
"custom_attributes_values": {
custom_attr.name: "test_value"
}
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
custom_attributes_values = apps.get_model("custom_attributes.UserStoryCustomAttributesValues").objects.get(
user_story__subject=response.data["subject"])
assert custom_attributes_values.attributes_values == {str(custom_attr.id): "test_value"}
def test_valid_issue_import_without_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Test"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
def test_valid_issue_import_with_custom_attributes_values(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
membership = f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
custom_attr = f.IssueCustomAttributeFactory(project=project)
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Test Custom Attrs Values Issues",
"custom_attributes_values": {
custom_attr.name: "test_value"
}
}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
custom_attributes_values = apps.get_model("custom_attributes.IssueCustomAttributesValues").objects.get(
issue__subject=response.data["subject"])
assert custom_attributes_values.attributes_values == {str(custom_attr.id): "test_value"}
def test_valid_issue_import_with_extra_data(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"finished_date": "2014-10-24T00:00:00+0000",
"attachments": [{
"owner": user.email,
"attached_file": {
"name": "imported attachment",
"data": base64.b64encode(b"TEST").decode("utf-8")
}
}],
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert len(response.data["attachments"]) == 1
assert response.data["owner"] == user.email
assert response.data["ref"] is not None
assert response.data["finished_date"] == "2014-10-24T00:00:00+0000"
assert response.data["watchers"] == [user_watching.email]
def test_invalid_issue_import_with_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"attachments": [{}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
assert Issue.objects.filter(subject="Imported issue").count() == 0
def test_invalid_issue_import_with_bad_choices(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
client.login(user)
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"status": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"priority": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"severity": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Imported issue",
"description": "Imported issue",
"type": "Not valid"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
#######################################################
## tes api/v1/importer/wiki-page
#######################################################
def test_invalid_wiki_page_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-page", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_wiki_page_import_without_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-page", args=[project.pk])
data = {
"slug": "imported-wiki-page",
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["owner"] == user.email
def test_valid_wiki_page_import_with_extra_data(client):
user = f.UserFactory.create()
user_watching = f.UserFactory.create(email="testing@taiga.io")
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-page", args=[project.pk])
data = {
"slug": "imported-wiki-page",
"content": "Imported wiki_page",
"attachments": [{
"owner": user.email,
"attached_file": {
"name": "imported attachment",
"data": base64.b64encode(b"TEST").decode("utf-8")
}
}],
"watchers": ["testing@taiga.io"]
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert len(response.data["attachments"]) == 1
assert response.data["owner"] == user.email
assert response.data["watchers"] == [user_watching.email]
def test_invalid_wiki_page_import_with_extra_data(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-page", args=[project.pk])
data = {
"slug": "imported-wiki-page",
"content": "Imported wiki_page",
"attachments": [{}],
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
assert len(response.data) == 1
assert WikiPage.objects.filter(slug="imported-wiki-page").count() == 0
#######################################################
## tes api/v1/importer/wiki-link
#######################################################
def test_invalid_wiki_link_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-link", args=[project.pk])
data = {}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_valid_wiki_link_import(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("importer-wiki-link", args=[project.pk])
data = {
"title": "Imported wiki_link",
"href": "imported-wiki-link",
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response.data
##################################################################
## tes taiga.export_import.services.store_project_from_dict
##################################################################
def test_services_store_project_from_dict_with_no_projects_slots_available(client):
user = f.UserFactory.create(max_private_projects=0)
data = {
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True
}
with pytest.raises(TaigaImportError) as excinfo:
project = services.store_project_from_dict(data, owner=user)
assert "can't have more private projects" in str(excinfo.value)
def test_services_store_project_from_dict_with_no_members_private_project_slots_available(client):
user = f.UserFactory.create(max_memberships_private_projects=2)
data = {
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True,
"roles": [{"name": "Role"}],
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
}
]
}
with pytest.raises(TaigaImportError) as excinfo:
project = services.store_project_from_dict(data, owner=user)
assert "reaches your current limit of memberships for private" in str(excinfo.value)
def test_services_store_project_from_dict_with_no_members_public_project_slots_available(client):
user = f.UserFactory.create(max_memberships_public_projects=2)
data = {
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False,
"roles": [{"name": "Role"}],
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
}
]
}
with pytest.raises(TaigaImportError) as excinfo:
project = services.store_project_from_dict(data, owner=user)
assert "reaches your current limit of memberships for public" in str(excinfo.value)
def test_services_store_project_from_dict_with_issue_priorities_names_as_None(client):
user = f.UserFactory.create()
data = {
"name": "Imported project",
"description": "Imported project",
"issue_types": [{"name": "Bug"}],
"issue_statuses": [{"name": "New"}],
"priorities": [{"name": "None", "order": 5, "color": "#CC0000"}],
"severities": [{"name": "Normal", "order": 5, "color": "#CC0000"}],
"issues": [{
"status": "New",
"priority": "None",
"severity": "Normal",
"type": "Bug",
"subject": "Test"}]}
project = services.store_project_from_dict(data, owner=user)
assert project.issues.first().priority.name == "None"
##################################################################
## tes api/v1/importer/load-dummp
##################################################################
def test_invalid_dump_import(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(b"test")
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
assert response.data["_error_message"] == "Invalid dump format"
def test_valid_dump_import_without_enough_public_projects_slots(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_public_projects=0)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "public-project-without-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
assert "can't have more public projects" in response.data["_error_message"]
assert response["Taiga-Info-Project-Memberships"] == "1"
assert response["Taiga-Info-Project-Is-Private"] == "False"
assert Project.objects.filter(slug="public-project-without-slots").count() == 0
def test_valid_dump_import_without_enough_private_projects_slots(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_private_projects=0)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "private-project-without-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
assert "can't have more private projects" in response.data["_error_message"]
assert response["Taiga-Info-Project-Memberships"] == "1"
assert response["Taiga-Info-Project-Is-Private"] == "True"
assert Project.objects.filter(slug="private-project-without-slots").count() == 0
def test_valid_dump_import_without_enough_membership_private_project_slots_one_project(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_private_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "project-without-memberships-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True,
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
{
"email": "test6@test.com",
"role": "Role",
},
],
"roles": [{"name": "Role"}]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
assert "reaches your current limit of memberships for private" in response.data["_error_message"]
assert Project.objects.filter(slug="project-without-memberships-slots").count() == 0
def test_valid_dump_import_without_enough_membership_public_project_slots_one_project(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_public_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "project-without-memberships-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False,
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
{
"email": "test6@test.com",
"role": "Role",
},
],
"roles": [{"name": "Role"}]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
assert "reaches your current limit of memberships for public" in response.data["_error_message"]
assert Project.objects.filter(slug="project-without-memberships-slots").count() == 0
def test_valid_dump_import_with_enough_membership_private_project_slots_multiple_projects(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_private_projects=10)
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "project-without-memberships-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True,
"roles": [{"name": "Role"}],
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
{
"email": "test6@test.com",
"role": "Role",
}
]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert "id" in response.data
assert response.data["name"] == "Valid project"
def test_valid_dump_import_with_enough_membership_public_project_slots_multiple_projects(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_public_projects=10)
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
f.MembershipFactory.create(project=project)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "project-without-memberships-slots",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False,
"roles": [{"name": "Role"}],
"memberships": [
{
"email": "test1@test.com",
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
{
"email": "test6@test.com",
"role": "Role",
}
]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert "id" in response.data
assert response.data["name"] == "Valid project"
def test_valid_dump_import_with_the_limit_of_membership_whit_you_for_private_project(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_private_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "private-project-with-memberships-limit-with-you",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True,
"memberships": [
{
"email": user.email,
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
],
"roles": [{"name": "Role"}]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert Project.objects.filter(slug="private-project-with-memberships-limit-with-you").count() == 1
def test_valid_dump_import_with_the_limit_of_membership_whit_you_for_public_project(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_public_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "public-project-with-memberships-limit-with-you",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False,
"memberships": [
{
"email": user.email,
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
],
"roles": [{"name": "Role"}]
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert Project.objects.filter(slug="public-project-with-memberships-limit-with-you").count() == 1
def test_valid_dump_import_with_celery_disabled(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert "id" in response.data
assert response.data["name"] == "Valid project"
def test_invalid_dump_import_with_celery_disabled(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create(max_memberships_public_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "invalid-project",
"name": "Invalid project",
"description": "Valid project desc",
"is_private": False,
"memberships": [
{
"email": user.email,
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
],
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 400
def test_valid_dump_import_with_celery_enabled(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 202
assert "import_id" in response.data
assert Project.objects.filter(slug="valid-project").count() == 1
def test_invalid_dump_import_with_celery_enabled(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create(max_memberships_public_projects=5)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "invalid-project",
"name": "Invalid project",
"description": "Valid project desc",
"is_private": False,
"memberships": [
{
"email": user.email,
"role": "Role",
},
{
"email": "test2@test.com",
"role": "Role",
},
{
"email": "test3@test.com",
"role": "Role",
},
{
"email": "test4@test.com",
"role": "Role",
},
{
"email": "test5@test.com",
"role": "Role",
},
],
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 202
assert "import_id" in response.data
assert Project.objects.filter(slug="invalid-project").count() == 0
def test_dump_import_throttling(client, settings):
settings.REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"]["import-dump-mode"] = "1/minute"
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": project.slug,
"name": "Test import",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
response = client.post(url, {'dump': data})
assert response.status_code == 429
def test_valid_dump_import_without_slug(client):
project = f.ProjectFactory.create(slug="existing-slug")
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"name": "Project name",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
def test_valid_dump_import_with_logo(client, settings):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": "valid-project",
"name": "Valid project",
"description": "Valid project desc",
"is_private": False,
"logo": {
"name": "logo.bmp",
"data": base64.b64encode(DUMMY_BMP_DATA).decode("utf-8")
}
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert "id" in response.data
assert response.data["name"] == "Valid project"
assert "logo_small_url" in response.data
assert response.data["logo_small_url"] != None
assert "logo_big_url" in response.data
assert response.data["logo_big_url"] != None
def test_valid_project_import_and_disabled_is_featured(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importer-list")
data = {
"name": "Imported project",
"description": "Imported project",
"roles": [{
"permissions": [],
"name": "Test"
}],
"is_featured": True
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["owner"] == user.email
assert response.data["is_featured"] == False
def test_dump_import_duplicated_project(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
client.login(user)
url = reverse("importer-load-dump")
data = ContentFile(bytes(json.dumps({
"slug": project.slug,
"name": "Test import",
"description": "Valid project desc",
"is_private": True
}), "utf-8"))
data.name = "test"
response = client.post(url, {'dump': data})
assert response.status_code == 201
assert response.data["name"] == "Test import"
assert response.data["slug"] == "{}-test-import".format(user.username)
| 32.492754
| 111
| 0.606869
| 6,121
| 56,050
| 5.422643
| 0.05473
| 0.046397
| 0.037961
| 0.04242
| 0.907146
| 0.894704
| 0.88208
| 0.874759
| 0.858279
| 0.847674
| 0
| 0.012256
| 0.237181
| 56,050
| 1,724
| 112
| 32.511601
| 0.764057
| 0.02405
| 0
| 0.735573
| 0
| 0
| 0.192866
| 0.026404
| 0
| 0
| 0
| 0
| 0.127831
| 1
| 0.046749
| false
| 0
| 0.159971
| 0
| 0.20672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a397d55bb058c5822dacffe59b136fda35d959b
| 25,839
|
py
|
Python
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/string_funcs.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/string_funcs.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/string_funcs.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import hashlib
from parserT28.models.instructions.Expression.expression import Expression, PrimitiveData, DATA_TYPE, Identifiers
from parserT28.controllers.error_controller import ErrorController
from parserT28.controllers.three_address_code import ThreeAddressCode
# TODO: REVISAR QUE NO MUERA CON DECODE, UNCODE, GETBYTE, SETBYTE, CONVERT
class Length(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, line, column):
self.value = value
self.alias = f'LENGTH({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
val = None
result = 0
lista1 = []
if isinstance(self.value, Identifiers):
val = self.value.process(environment)
result = [len(columns) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
val = self.value.process(environment).value
l = len(val)
return PrimitiveData(DATA_TYPE.NUMBER, l, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Length"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal} = len({temp})")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class Substring(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, down, up, line, column):
self.value = value
self.alias = f'SUBSTRING({self.value.alias})'
self.up = up
self.down = down
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
val = None
result = 0
lista1 = []
i = self.down.process(environment).value
j = self.up.process(environment).value
if isinstance(self.value, Identifiers):
val = self.value.process(environment)
result = [columns[i:j] for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
substr = cadena[i:j]
return PrimitiveData(DATA_TYPE.STRING, substr, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Substring"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
i = self.down.compile(environment).value
j = self.up.compile(environment).value
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
tempi = ThreeAddressCode().newTemp()
tempj = ThreeAddressCode().newTemp()
dataTempi = f"{tempi} = {i}"
dataTempj = f"{tempj} = {j}"
ThreeAddressCode().addCode(dataTemp)
ThreeAddressCode().addCode(dataTempi)
ThreeAddressCode().addCode(dataTempj)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal} = {temp}[{tempi}:{tempj}]")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class Substr(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, down, up, line, column):
self.value = value
self.alias = f'SUBSTR({self.value.alias})'
self.up = up
self.down = down
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
val = None
result = 0
lista1 = []
i = self.down.process(environment).value
j = self.up.process(environment).value
if isinstance(self.value, Identifiers):
val = self.value.process(environment)
result = [columns[i:j] for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
substr = cadena[i:j]
return PrimitiveData(DATA_TYPE.STRING, substr, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Substring"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
i = self.down.compile(environment).value
j = self.up.compile(environment).value
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
tempi = ThreeAddressCode().newTemp()
tempj = ThreeAddressCode().newTemp()
dataTempi = f"{tempi} = {i}"
dataTempj = f"{tempj} = {j}"
ThreeAddressCode().addCode(dataTemp)
ThreeAddressCode().addCode(dataTempi)
ThreeAddressCode().addCode(dataTempj)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal} = {temp}[{tempi}:{tempj}]")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class Trim(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, line, column):
self.value = value
self.alias = f'TRIM({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [columns.strip() for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
trim_str = cadena.strip()
return PrimitiveData(DATA_TYPE.STRING, trim_str, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Trim"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal} = {temp}.strip()")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class MD5(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, line, column):
self.value = value
self.alias = f'MD5({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [hashlib.md5(columns.encode()).hexdigest()
for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
result = hashlib.md5(cadena.encode())
return PrimitiveData(DATA_TYPE.STRING, result.hexdigest(), self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para md5"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal} = md5({temp}.encode()).hexdigest()")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class SHA256(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, line, column):
self.value = value
self.alias = f'SHA256({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [hashlib.sha256(columns.encode()).hexdigest()
for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
result = hashlib.sha256(cadena.encode())
return PrimitiveData(DATA_TYPE.STRING, result.hexdigest(), self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para sha256"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal} = sha256({temp}.encode()).hexdigest()")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class GetByte(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, pos, line, column):
self.value = value
self.pos = pos
self.alias = f'GETBYTE({self.value.alias})'
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
index = self.pos.process(environment).value
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [ord(columns[index]) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
result = ord(cadena[index])
return PrimitiveData(DATA_TYPE.STRING, result, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para GetByte"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
tempPos = ThreeAddressCode().newTemp()
index = self.pos.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
dataPos = f'{tempPos} = {index}'
ThreeAddressCode().addCode(dataTemp)
ThreeAddressCode().addCode(dataPos)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal} = ord({temp}[{tempPos}])")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class SetByte(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, pos, no_char, line, column):
self.value = value
self.pos = pos
self.no_char = no_char
self.alias = f'SETBYTE({self.value.alias})'
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
index = self.pos.process(environment).value
char = self.no_char.process(environment).value
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [(columns[:index] + chr(char) + columns[index + 1:])
for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
result = cadena[:index] + chr(char) + cadena[index + 1:]
return PrimitiveData(DATA_TYPE.STRING, result, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para SetByte"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self):
return super().compile()
class Convert(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, data_type, line, column):
self.value = value
self.data_type = data_type
self.alias = f'CONVERT({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if self.data_type.lower() == "integer":
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [int(columns) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
return PrimitiveData(DATA_TYPE.NUMBER, int(cadena), self.line, self.column)
else:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [str(columns) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
return PrimitiveData(DATA_TYPE.STRING, cadena, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Convert"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, environment):
try:
if self.data_type.lower() == 'integer':
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
cambio = False
if val[0] == 't':
sub = val[1:]
if sub.isnumeric(): # ES UN TEMPORAL
dataTemp = f"{temp} = {val}"
cambio = True
if cambio is False:
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal} = int({temp})")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
else:
temp = ThreeAddressCode().newTemp()
val = self.value.compile(environment).value
dataTemp = f"{temp} = '{val}'"
ThreeAddressCode().addCode(dataTemp)
temporal = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal} = {temp}")
return PrimitiveData(DATA_TYPE.STRING, temporal, self.line, self.column)
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
class Encode(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, format_text, line, column):
self.value = value
self.value = format_text
self.alias = f'ENCODE({self.value.alias})'
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [str(columns) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
return PrimitiveData(DATA_TYPE.STRING, cadena, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Encode"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self):
pass
class Decode(Expression):
'''
La función se usa para devolver el valor después de
redondear un número hasta un decimal específico,
proporcionado en el argumento.
'''
def __init__(self, value, format_text, line, column):
self.value = value
self.value = format_text
self.alias = f'DECODE({self.value.alias})'
self.line = line
self.column = column
self._tac = self.alias
def __repr__(self):
return str(vars(self))
def process(self, environment):
try:
if isinstance(self.value, Identifiers):
lista1 = []
val = self.value.process(environment)
result = [str(columns) for columns in val[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
cadena = self.value.process(environment).value
return PrimitiveData(DATA_TYPE.STRING, cadena, self.line, self.column)
except TypeError:
desc = "Tipo de dato invalido para Decode"
ErrorController().add(37, 'Execution', desc, self.line, self.column)
return
except:
desc = "FATAL ERROR --- StringFuncs"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self):
pass
| 37.285714
| 113
| 0.543984
| 2,586
| 25,839
| 5.382831
| 0.054524
| 0.051724
| 0.062356
| 0.065948
| 0.940086
| 0.928592
| 0.923491
| 0.918966
| 0.918966
| 0.909914
| 0
| 0.010154
| 0.348272
| 25,839
| 692
| 114
| 37.339595
| 0.816449
| 0.064515
| 0
| 0.870036
| 0
| 0
| 0.092859
| 0.017748
| 0
| 0
| 0
| 0.001445
| 0
| 1
| 0.079422
| false
| 0.00361
| 0.00722
| 0.021661
| 0.207581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e00e8d7098d6c635be87a8bcf5754d3f09130d57
| 34,928
|
py
|
Python
|
models/networks.py
|
Yangzhen0000/InstColorization
|
a55f0c02c7db82e9d9175c2093b934570daa2fa4
|
[
"MIT"
] | 1
|
2021-02-08T02:13:12.000Z
|
2021-02-08T02:13:12.000Z
|
models/networks.py
|
Yangzhen0000/InstColorization
|
a55f0c02c7db82e9d9175c2093b934570daa2fa4
|
[
"MIT"
] | null | null | null |
models/networks.py
|
Yangzhen0000/InstColorization
|
a55f0c02c7db82e9d9175c2093b934570daa2fa4
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import init
import functools
import torch.nn.functional as F
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def init_weights(net, init_type='xavier', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='xavier', gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type)
return net
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='xavier', gpu_ids=[], use_tanh=True, classification=True):
netG = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netG =='siggraph':
netG = SIGGRAPHGenerator(input_nc, output_nc, norm_layer=norm_layer, use_tanh=use_tanh, classification=classification)
elif which_model_netG =='instance':
netG = InstanceGenerator(input_nc, output_nc, norm_layer=norm_layer, use_tanh=use_tanh, classification=classification)
elif which_model_netG == 'fusion':
netG = FusionGenerator(input_nc, output_nc, norm_layer=norm_layer, use_tanh=use_tanh, classification=classification)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
return init_net(netG, init_type, gpu_ids)
class SIGGRAPHGenerator(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, use_tanh=True, classification=True):
super(SIGGRAPHGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.classification = classification
use_bias = True
# Conv1
# model1=[nn.ReflectionPad2d(1),]
model1=[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model1+=[norm_layer(64),]
model1+=[nn.ReLU(True),]
# model1+=[nn.ReflectionPad2d(1),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
# add a subsampling operation
# Conv2
# model2=[nn.ReflectionPad2d(1),]
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model2+=[norm_layer(128),]
model2+=[nn.ReLU(True),]
# model2+=[nn.ReflectionPad2d(1),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
# add a subsampling layer operation
# Conv3
# model3=[nn.ReflectionPad2d(1),]
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
# add a subsampling layer operation
# Conv4
# model47=[nn.ReflectionPad2d(1),]
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
# Conv5
# model47+=[nn.ReflectionPad2d(2),]
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
# Conv6
# model6+=[nn.ReflectionPad2d(2),]
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
# Conv7
# model47+=[nn.ReflectionPad2d(1),]
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
# Conv7
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=use_bias)]
# model3short8=[nn.ReflectionPad2d(1),]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model47+=[norm_layer(256),]
model8=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model8+=[norm_layer(256),]
model8+=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
# Conv9
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model2short9=[nn.ReflectionPad2d(1),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
# model9=[norm_layer(128),]
model9=[nn.ReLU(True),]
# model9+=[nn.ReflectionPad2d(1),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
# Conv10
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model1short10=[nn.ReflectionPad2d(1),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
# model10=[norm_layer(128),]
model10=[nn.ReLU(True),]
# model10+=[nn.ReflectionPad2d(1),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=use_bias),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
# classification output
model_class=[nn.Conv2d(256, 529, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
# regression output
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
if(use_tanh):
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B, mask_B):
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3)
conv8_3 = self.model8(conv8_up)
if(self.classification):
out_class = self.model_class(conv8_3)
conv9_up = self.model9up(conv8_3.detach()) + self.model2short9(conv2_2.detach())
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2.detach())
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
else:
out_class = self.model_class(conv8_3.detach())
conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2)
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2)
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return (out_class, out_reg)
class FusionGenerator(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, use_tanh=True, classification=True):
super(FusionGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.classification = classification
use_bias = True
# Conv1
# model1=[nn.ReflectionPad2d(1),]
model1=[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model1+=[norm_layer(64),]
model1+=[nn.ReLU(True),]
# model1+=[nn.ReflectionPad2d(1),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
# add a subsampling operation
self.weight_layer = WeightGenerator(64)
# Conv2
# model2=[nn.ReflectionPad2d(1),]
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model2+=[norm_layer(128),]
model2+=[nn.ReLU(True),]
# model2+=[nn.ReflectionPad2d(1),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
# add a subsampling layer operation
self.weight_layer2 = WeightGenerator(128)
# Conv3
# model3=[nn.ReflectionPad2d(1),]
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
# add a subsampling layer operation
self.weight_layer3 = WeightGenerator(256)
# Conv4
# model47=[nn.ReflectionPad2d(1),]
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
self.weight_layer4 = WeightGenerator(512)
# Conv5
# model47+=[nn.ReflectionPad2d(2),]
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
self.weight_layer5 = WeightGenerator(512)
# Conv6
# model6+=[nn.ReflectionPad2d(2),]
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
self.weight_layer6 = WeightGenerator(512)
# Conv7
# model47+=[nn.ReflectionPad2d(1),]
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
self.weight_layer7 = WeightGenerator(512)
# Conv7
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=use_bias)]
# model3short8=[nn.ReflectionPad2d(1),]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
self.weight_layer8_1 = WeightGenerator(256)
# model47+=[norm_layer(256),]
model8=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model8+=[norm_layer(256),]
model8+=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
self.weight_layer8_2 = WeightGenerator(256)
# Conv9
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model2short9=[nn.ReflectionPad2d(1),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
self.weight_layer9_1 = WeightGenerator(128)
# model9=[norm_layer(128),]
model9=[nn.ReLU(True),]
# model9+=[nn.ReflectionPad2d(1),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
self.weight_layer9_2 = WeightGenerator(128)
# Conv10
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model1short10=[nn.ReflectionPad2d(1),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
self.weight_layer10_1 = WeightGenerator(128)
# model10=[norm_layer(128),]
model10=[nn.ReLU(True),]
# model10+=[nn.ReflectionPad2d(1),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=use_bias),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
self.weight_layer10_2 = WeightGenerator(128)
# classification output
model_class=[nn.Conv2d(256, 529, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
# regression output
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
if(use_tanh):
model_out+=[nn.Tanh()]
self.weight_layerout = WeightGenerator(2)
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B, mask_B, instance_feature, box_info_list):
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv1_2 = self.weight_layer(instance_feature['conv1_2'], conv1_2, box_info_list[0])
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv2_2 = self.weight_layer2(instance_feature['conv2_2'], conv2_2, box_info_list[1])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv3_3 = self.weight_layer3(instance_feature['conv3_3'], conv3_3, box_info_list[2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv4_3 = self.weight_layer4(instance_feature['conv4_3'], conv4_3, box_info_list[3])
conv5_3 = self.model5(conv4_3)
conv5_3 = self.weight_layer5(instance_feature['conv5_3'], conv5_3, box_info_list[3])
conv6_3 = self.model6(conv5_3)
conv6_3 = self.weight_layer6(instance_feature['conv6_3'], conv6_3, box_info_list[3])
conv7_3 = self.model7(conv6_3)
conv7_3 = self.weight_layer7(instance_feature['conv7_3'], conv7_3, box_info_list[3])
conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3)
conv8_up = self.weight_layer8_1(instance_feature['conv8_up'], conv8_up, box_info_list[2])
conv8_3 = self.model8(conv8_up)
conv8_3 = self.weight_layer8_2(instance_feature['conv8_3'], conv8_3, box_info_list[2])
conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2)
conv9_up = self.weight_layer9_1(instance_feature['conv9_up'], conv9_up, box_info_list[1])
conv9_3 = self.model9(conv9_up)
conv9_3 = self.weight_layer9_2(instance_feature['conv9_3'], conv9_3, box_info_list[1])
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2)
conv10_up = self.weight_layer10_1(instance_feature['conv10_up'], conv10_up, box_info_list[0])
conv10_2 = self.model10(conv10_up)
conv10_2 = self.weight_layer10_2(instance_feature['conv10_2'], conv10_2, box_info_list[0])
out_reg = self.model_out(conv10_2)
return out_reg
class WeightGenerator(nn.Module):
def __init__(self, input_ch, inner_ch=16):
super(WeightGenerator, self).__init__()
self.simple_instance_conv = nn.Sequential(
nn.Conv2d(input_ch, inner_ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(inner_ch, inner_ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(inner_ch, 1, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
)
self.simple_bg_conv = nn.Sequential(
nn.Conv2d(input_ch, inner_ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(inner_ch, inner_ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(inner_ch, 1, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
)
self.normalize = nn.Softmax(1)
def resize_and_pad(self, feauture_maps, info_array):
feauture_maps = torch.nn.functional.interpolate(feauture_maps, size=(info_array[5], info_array[4]), mode='bilinear')
feauture_maps = torch.nn.functional.pad(feauture_maps, (info_array[0], info_array[1], info_array[2], info_array[3]), "constant", 0)
return feauture_maps
def forward(self, instance_feature, bg_feature, box_info):
mask_list = []
featur_map_list = []
mask_sum_for_pred = torch.zeros_like(bg_feature)[:1, :1]
for i in range(instance_feature.shape[0]):
tmp_crop = torch.unsqueeze(instance_feature[i], 0)
conv_tmp_crop = self.simple_instance_conv(tmp_crop)
pred_mask = self.resize_and_pad(conv_tmp_crop, box_info[i])
tmp_crop = self.resize_and_pad(tmp_crop, box_info[i])
mask = torch.zeros_like(bg_feature)[:1, :1]
mask[0, 0, box_info[i][2]:box_info[i][2] + box_info[i][5], box_info[i][0]:box_info[i][0] + box_info[i][4]] = 1.0
device = mask.device
mask = mask.type(torch.FloatTensor).to(device)
mask_sum_for_pred = torch.clamp(mask_sum_for_pred + mask, 0.0, 1.0)
mask_list.append(pred_mask)
featur_map_list.append(tmp_crop)
pred_bg_mask = self.simple_bg_conv(bg_feature)
mask_list.append(pred_bg_mask + (1 - mask_sum_for_pred) * 100000.0)
mask_list = self.normalize(torch.cat(mask_list, 1))
mask_list_maskout = mask_list.clone()
instance_mask = torch.clamp(torch.sum(mask_list_maskout[:, :instance_feature.shape[0]], 1, keepdim=True), 0.0, 1.0)
featur_map_list.append(bg_feature)
featur_map_list = torch.cat(featur_map_list, 0)
mask_list_maskout = mask_list_maskout.permute(1, 0, 2, 3).contiguous()
out = featur_map_list * mask_list_maskout
out = torch.sum(out, 0, keepdim=True)
return out # , instance_mask, torch.clamp(mask_list, 0.0, 1.0)
class InstanceGenerator(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, use_tanh=True, classification=True):
super(InstanceGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.classification = classification
use_bias = True
# Conv1
# model1=[nn.ReflectionPad2d(1),]
model1=[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model1+=[norm_layer(64),]
model1+=[nn.ReLU(True),]
# model1+=[nn.ReflectionPad2d(1),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
# add a subsampling operation
# Conv2
# model2=[nn.ReflectionPad2d(1),]
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model2+=[norm_layer(128),]
model2+=[nn.ReLU(True),]
# model2+=[nn.ReflectionPad2d(1),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
# add a subsampling layer operation
# Conv3
# model3=[nn.ReflectionPad2d(1),]
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model3+=[norm_layer(256),]
model3+=[nn.ReLU(True),]
# model3+=[nn.ReflectionPad2d(1),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
# add a subsampling layer operation
# Conv4
# model47=[nn.ReflectionPad2d(1),]
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model4+=[norm_layer(512),]
model4+=[nn.ReLU(True),]
# model4+=[nn.ReflectionPad2d(1),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
# Conv5
# model47+=[nn.ReflectionPad2d(2),]
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model5+=[norm_layer(512),]
model5+=[nn.ReLU(True),]
# model5+=[nn.ReflectionPad2d(2),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
# Conv6
# model6+=[nn.ReflectionPad2d(2),]
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
# model6+=[norm_layer(512),]
model6+=[nn.ReLU(True),]
# model6+=[nn.ReflectionPad2d(2),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
# Conv7
# model47+=[nn.ReflectionPad2d(1),]
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model7+=[norm_layer(512),]
model7+=[nn.ReLU(True),]
# model7+=[nn.ReflectionPad2d(1),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
# Conv7
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=use_bias)]
# model3short8=[nn.ReflectionPad2d(1),]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model47+=[norm_layer(256),]
model8=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# model8+=[norm_layer(256),]
model8+=[nn.ReLU(True),]
# model8+=[nn.ReflectionPad2d(1),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
# Conv9
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model2short9=[nn.ReflectionPad2d(1),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
# model9=[norm_layer(128),]
model9=[nn.ReLU(True),]
# model9+=[nn.ReflectionPad2d(1),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
# Conv10
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=use_bias),]
# model1short10=[nn.ReflectionPad2d(1),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias),]
# add the two feature maps above
# model10=[norm_layer(128),]
model10=[nn.ReLU(True),]
# model10+=[nn.ReflectionPad2d(1),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=use_bias),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
# classification output
model_class=[nn.Conv2d(256, 529, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
# regression output
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias),]
if(use_tanh):
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B, mask_B):
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3)
conv8_3 = self.model8(conv8_up)
if(self.classification):
out_class = self.model_class(conv8_3)
conv9_up = self.model9up(conv8_3.detach()) + self.model2short9(conv2_2.detach())
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2.detach())
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
else:
out_class = self.model_class(conv8_3.detach())
conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2)
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2)
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
feature_map = {}
feature_map['conv1_2'] = conv1_2
feature_map['conv2_2'] = conv2_2
feature_map['conv3_3'] = conv3_3
feature_map['conv4_3'] = conv4_3
feature_map['conv5_3'] = conv5_3
feature_map['conv6_3'] = conv6_3
feature_map['conv7_3'] = conv7_3
feature_map['conv8_up'] = conv8_up
feature_map['conv8_3'] = conv8_3
feature_map['conv9_up'] = conv9_up
feature_map['conv9_3'] = conv9_3
feature_map['conv10_up'] = conv10_up
feature_map['conv10_2'] = conv10_2
feature_map['out_reg'] = out_reg
return (out_reg, feature_map)
| 43.38882
| 158
| 0.619045
| 4,656
| 34,928
| 4.454253
| 0.04811
| 0.047736
| 0.049327
| 0.043397
| 0.821399
| 0.790491
| 0.784319
| 0.774001
| 0.771686
| 0.768407
| 0
| 0.092284
| 0.22409
| 34,928
| 805
| 159
| 43.38882
| 0.672964
| 0.136424
| 0
| 0.717435
| 0
| 0
| 0.017442
| 0
| 0
| 0
| 0
| 0
| 0.002004
| 1
| 0.028056
| false
| 0
| 0.01002
| 0
| 0.062124
| 0.002004
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e01313147a1de6d91ec76a3a8e4bbf0ecfce37d7
| 36,607
|
py
|
Python
|
msgraph-cli-extensions/v1_0/education_v1_0/azext_education_v1_0/generated/_help.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/education_v1_0/azext_education_v1_0/generated/_help.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/education_v1_0/azext_education_v1_0/generated/_help.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['education_v1_0'] = '''
type: group
short-summary: Manage Education
'''
helps['education education-root'] = """
type: group
short-summary: Manage education education root with education_v1_0
"""
helps['education education-root show-education-root'] = """
type: command
short-summary: "Get education."
"""
helps['education education-root update-education-root'] = """
type: command
short-summary: "Update education."
"""
helps['education education'] = """
type: group
short-summary: Manage education with education_v1_0
"""
helps['education education create-class'] = """
type: command
short-summary: "Create new navigation property to classes for education."
parameters:
- name: --term
short-summary: "educationTerm"
long-summary: |
Usage: --term display-name=XX end-date=XX external-id=XX start-date=XX
display-name: Display name of the term.
end-date: End of the term.
external-id: ID of term in the syncing system.
start-date: Start of the term.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education create-school'] = """
type: command
short-summary: "Create new navigation property to schools for education."
parameters:
- name: --address
short-summary: "physicalAddress"
long-summary: |
Usage: --address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education create-user'] = """
type: command
short-summary: "Create new navigation property to users for education."
parameters:
- name: --assigned-licenses
short-summary: "The licenses that are assigned to the user. Not nullable."
long-summary: |
Usage: --assigned-licenses disabled-plans=XX sku-id=XX
disabled-plans: A collection of the unique identifiers for plans that have been disabled.
sku-id: The unique identifier for the SKU.
Multiple actions can be specified by using more than one --assigned-licenses argument.
- name: --assigned-plans
short-summary: "The plans that are assigned to the user. Read-only. Not nullable."
long-summary: |
Usage: --assigned-plans assigned-date-time=XX capability-status=XX service=XX service-plan-id=XX
assigned-date-time: The date and time at which the plan was assigned; for example: 2013-01-02T19:32:30Z. \
The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, \
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
capability-status: For example, 'Enabled'.
service: The name of the service; for example, 'Exchange'.
service-plan-id: A GUID that identifies the service plan.
Multiple actions can be specified by using more than one --assigned-plans argument.
- name: --mailing-address
short-summary: "physicalAddress"
long-summary: |
Usage: --mailing-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --password-profile
short-summary: "passwordProfile"
long-summary: |
Usage: --password-profile force-change-password-next-sign-in=XX force-change-password-next-sign-in-with-mfa\
=XX password=XX
force-change-password-next-sign-in: true if the user must change her password on the next login; otherwise \
false.
force-change-password-next-sign-in-with-mfa: If true, at next sign-in, the user must perform a \
multi-factor authentication (MFA) before being forced to change their password. The behavior is identical to \
forceChangePasswordNextSignIn except that the user is required to first perform a multi-factor authentication before \
password change. After a password change, this property will be automatically reset to false. If not set, default is \
false.
password: The password for the user. This property is required when a user is created. It can be updated, \
but the user will be required to change the password on the next login. The password must satisfy minimum requirements \
as specified by the user’s passwordPolicies property. By default, a strong password is required.
- name: --provisioned-plans
short-summary: "The plans that are provisioned for the user. Read-only. Not nullable."
long-summary: |
Usage: --provisioned-plans capability-status=XX provisioning-status=XX service=XX
capability-status: For example, 'Enabled'.
provisioning-status: For example, 'Success'.
service: The name of the service; for example, 'AccessControlS2S'
Multiple actions can be specified by using more than one --provisioned-plans argument.
- name: --residence-address
short-summary: "physicalAddress"
long-summary: |
Usage: --residence-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --student
short-summary: "educationStudent"
long-summary: |
Usage: --student birth-date=XX external-id=XX gender=XX grade=XX graduation-year=XX student-number=XX
birth-date: Birth date of the student.
external-id: ID of the student in the source system.
grade: Current grade level of the student.
graduation-year: Year the student is graduating from the school.
student-number: Student Number.
- name: --teacher
short-summary: "educationTeacher"
long-summary: |
Usage: --teacher external-id=XX teacher-number=XX
external-id: ID of the teacher in the source system.
teacher-number: Teacher number.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education delete-class'] = """
type: command
short-summary: "Delete navigation property classes for education."
"""
helps['education education delete-me'] = """
type: command
short-summary: "Delete navigation property me for education."
"""
helps['education education delete-school'] = """
type: command
short-summary: "Delete navigation property schools for education."
"""
helps['education education delete-user'] = """
type: command
short-summary: "Delete navigation property users for education."
"""
helps['education education list-class'] = """
type: command
short-summary: "Get classes from education."
"""
helps['education education list-school'] = """
type: command
short-summary: "Get schools from education."
"""
helps['education education list-user'] = """
type: command
short-summary: "Get users from education."
"""
helps['education education show-class'] = """
type: command
short-summary: "Get classes from education."
"""
helps['education education show-me'] = """
type: command
short-summary: "Get me from education."
"""
helps['education education show-school'] = """
type: command
short-summary: "Get schools from education."
"""
helps['education education show-user'] = """
type: command
short-summary: "Get users from education."
"""
helps['education education update-class'] = """
type: command
short-summary: "Update the navigation property classes in education."
parameters:
- name: --term
short-summary: "educationTerm"
long-summary: |
Usage: --term display-name=XX end-date=XX external-id=XX start-date=XX
display-name: Display name of the term.
end-date: End of the term.
external-id: ID of term in the syncing system.
start-date: Start of the term.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education update-me'] = """
type: command
short-summary: "Update the navigation property me in education."
parameters:
- name: --assigned-licenses
short-summary: "The licenses that are assigned to the user. Not nullable."
long-summary: |
Usage: --assigned-licenses disabled-plans=XX sku-id=XX
disabled-plans: A collection of the unique identifiers for plans that have been disabled.
sku-id: The unique identifier for the SKU.
Multiple actions can be specified by using more than one --assigned-licenses argument.
- name: --assigned-plans
short-summary: "The plans that are assigned to the user. Read-only. Not nullable."
long-summary: |
Usage: --assigned-plans assigned-date-time=XX capability-status=XX service=XX service-plan-id=XX
assigned-date-time: The date and time at which the plan was assigned; for example: 2013-01-02T19:32:30Z. \
The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, \
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
capability-status: For example, 'Enabled'.
service: The name of the service; for example, 'Exchange'.
service-plan-id: A GUID that identifies the service plan.
Multiple actions can be specified by using more than one --assigned-plans argument.
- name: --mailing-address
short-summary: "physicalAddress"
long-summary: |
Usage: --mailing-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --password-profile
short-summary: "passwordProfile"
long-summary: |
Usage: --password-profile force-change-password-next-sign-in=XX force-change-password-next-sign-in-with-mfa\
=XX password=XX
force-change-password-next-sign-in: true if the user must change her password on the next login; otherwise \
false.
force-change-password-next-sign-in-with-mfa: If true, at next sign-in, the user must perform a \
multi-factor authentication (MFA) before being forced to change their password. The behavior is identical to \
forceChangePasswordNextSignIn except that the user is required to first perform a multi-factor authentication before \
password change. After a password change, this property will be automatically reset to false. If not set, default is \
false.
password: The password for the user. This property is required when a user is created. It can be updated, \
but the user will be required to change the password on the next login. The password must satisfy minimum requirements \
as specified by the user’s passwordPolicies property. By default, a strong password is required.
- name: --provisioned-plans
short-summary: "The plans that are provisioned for the user. Read-only. Not nullable."
long-summary: |
Usage: --provisioned-plans capability-status=XX provisioning-status=XX service=XX
capability-status: For example, 'Enabled'.
provisioning-status: For example, 'Success'.
service: The name of the service; for example, 'AccessControlS2S'
Multiple actions can be specified by using more than one --provisioned-plans argument.
- name: --residence-address
short-summary: "physicalAddress"
long-summary: |
Usage: --residence-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --student
short-summary: "educationStudent"
long-summary: |
Usage: --student birth-date=XX external-id=XX gender=XX grade=XX graduation-year=XX student-number=XX
birth-date: Birth date of the student.
external-id: ID of the student in the source system.
grade: Current grade level of the student.
graduation-year: Year the student is graduating from the school.
student-number: Student Number.
- name: --teacher
short-summary: "educationTeacher"
long-summary: |
Usage: --teacher external-id=XX teacher-number=XX
external-id: ID of the teacher in the source system.
teacher-number: Teacher number.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education update-school'] = """
type: command
short-summary: "Update the navigation property schools in education."
parameters:
- name: --address
short-summary: "physicalAddress"
long-summary: |
Usage: --address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education update-user'] = """
type: command
short-summary: "Update the navigation property users in education."
parameters:
- name: --assigned-licenses
short-summary: "The licenses that are assigned to the user. Not nullable."
long-summary: |
Usage: --assigned-licenses disabled-plans=XX sku-id=XX
disabled-plans: A collection of the unique identifiers for plans that have been disabled.
sku-id: The unique identifier for the SKU.
Multiple actions can be specified by using more than one --assigned-licenses argument.
- name: --assigned-plans
short-summary: "The plans that are assigned to the user. Read-only. Not nullable."
long-summary: |
Usage: --assigned-plans assigned-date-time=XX capability-status=XX service=XX service-plan-id=XX
assigned-date-time: The date and time at which the plan was assigned; for example: 2013-01-02T19:32:30Z. \
The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, \
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
capability-status: For example, 'Enabled'.
service: The name of the service; for example, 'Exchange'.
service-plan-id: A GUID that identifies the service plan.
Multiple actions can be specified by using more than one --assigned-plans argument.
- name: --mailing-address
short-summary: "physicalAddress"
long-summary: |
Usage: --mailing-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --password-profile
short-summary: "passwordProfile"
long-summary: |
Usage: --password-profile force-change-password-next-sign-in=XX force-change-password-next-sign-in-with-mfa\
=XX password=XX
force-change-password-next-sign-in: true if the user must change her password on the next login; otherwise \
false.
force-change-password-next-sign-in-with-mfa: If true, at next sign-in, the user must perform a \
multi-factor authentication (MFA) before being forced to change their password. The behavior is identical to \
forceChangePasswordNextSignIn except that the user is required to first perform a multi-factor authentication before \
password change. After a password change, this property will be automatically reset to false. If not set, default is \
false.
password: The password for the user. This property is required when a user is created. It can be updated, \
but the user will be required to change the password on the next login. The password must satisfy minimum requirements \
as specified by the user’s passwordPolicies property. By default, a strong password is required.
- name: --provisioned-plans
short-summary: "The plans that are provisioned for the user. Read-only. Not nullable."
long-summary: |
Usage: --provisioned-plans capability-status=XX provisioning-status=XX service=XX
capability-status: For example, 'Enabled'.
provisioning-status: For example, 'Success'.
service: The name of the service; for example, 'AccessControlS2S'
Multiple actions can be specified by using more than one --provisioned-plans argument.
- name: --residence-address
short-summary: "physicalAddress"
long-summary: |
Usage: --residence-address city=XX country-or-region=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
- name: --student
short-summary: "educationStudent"
long-summary: |
Usage: --student birth-date=XX external-id=XX gender=XX grade=XX graduation-year=XX student-number=XX
birth-date: Birth date of the student.
external-id: ID of the student in the source system.
grade: Current grade level of the student.
graduation-year: Year the student is graduating from the school.
student-number: Student Number.
- name: --teacher
short-summary: "educationTeacher"
long-summary: |
Usage: --teacher external-id=XX teacher-number=XX
external-id: ID of the teacher in the source system.
teacher-number: Teacher number.
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['education education-class'] = """
type: group
short-summary: Manage education class with education_v1_0
"""
helps['education education-class create-ref-member'] = """
type: command
short-summary: "Create new navigation property ref to members for education."
"""
helps['education education-class create-ref-school'] = """
type: command
short-summary: "Create new navigation property ref to schools for education."
"""
helps['education education-class create-ref-teacher'] = """
type: command
short-summary: "Create new navigation property ref to teachers for education."
"""
helps['education education-class delete-ref-group'] = """
type: command
short-summary: "Delete ref of navigation property group for education."
"""
helps['education education-class list-member'] = """
type: command
short-summary: "Get members from education."
"""
helps['education education-class list-ref-member'] = """
type: command
short-summary: "Get ref of members from education."
"""
helps['education education-class list-ref-school'] = """
type: command
short-summary: "Get ref of schools from education."
"""
helps['education education-class list-ref-teacher'] = """
type: command
short-summary: "Get ref of teachers from education."
"""
helps['education education-class list-school'] = """
type: command
short-summary: "Get schools from education."
"""
helps['education education-class list-teacher'] = """
type: command
short-summary: "Get teachers from education."
"""
helps['education education-class set-ref-group'] = """
type: command
short-summary: "Update the ref of navigation property group in education."
"""
helps['education education-class show-group'] = """
type: command
short-summary: "Get group from education."
"""
helps['education education-class show-ref-group'] = """
type: command
short-summary: "Get ref of group from education."
"""
helps['education education-me'] = """
type: group
short-summary: Manage education me with education_v1_0
"""
helps['education education-me create-ref-class'] = """
type: command
short-summary: "Create new navigation property ref to classes for education."
"""
helps['education education-me create-ref-school'] = """
type: command
short-summary: "Create new navigation property ref to schools for education."
"""
helps['education education-me delete-ref-user'] = """
type: command
short-summary: "Delete ref of navigation property user for education."
"""
helps['education education-me list-class'] = """
type: command
short-summary: "Get classes from education."
"""
helps['education education-me list-ref-class'] = """
type: command
short-summary: "Get ref of classes from education."
"""
helps['education education-me list-ref-school'] = """
type: command
short-summary: "Get ref of schools from education."
"""
helps['education education-me list-school'] = """
type: command
short-summary: "Get schools from education."
"""
helps['education education-me set-ref-user'] = """
type: command
short-summary: "Update the ref of navigation property user in education."
"""
helps['education education-me show-ref-user'] = """
type: command
short-summary: "Get ref of user from education."
"""
helps['education education-me show-user'] = """
type: command
short-summary: "Get user from education."
"""
helps['education education-school'] = """
type: group
short-summary: Manage education school with education_v1_0
"""
helps['education education-school create-ref-class'] = """
type: command
short-summary: "Create new navigation property ref to classes for education."
"""
helps['education education-school create-ref-user'] = """
type: command
short-summary: "Create new navigation property ref to users for education."
"""
helps['education education-school list-class'] = """
type: command
short-summary: "Get classes from education."
"""
helps['education education-school list-ref-class'] = """
type: command
short-summary: "Get ref of classes from education."
"""
helps['education education-school list-ref-user'] = """
type: command
short-summary: "Get ref of users from education."
"""
helps['education education-school list-user'] = """
type: command
short-summary: "Get users from education."
"""
helps['education education-user'] = """
type: group
short-summary: Manage education user with education_v1_0
"""
helps['education education-user create-ref-class'] = """
type: command
short-summary: "Create new navigation property ref to classes for education."
"""
helps['education education-user create-ref-school'] = """
type: command
short-summary: "Create new navigation property ref to schools for education."
"""
helps['education education-user delete-ref-user'] = """
type: command
short-summary: "Delete ref of navigation property user for education."
"""
helps['education education-user list-class'] = """
type: command
short-summary: "Get classes from education."
"""
helps['education education-user list-ref-class'] = """
type: command
short-summary: "Get ref of classes from education."
"""
helps['education education-user list-ref-school'] = """
type: command
short-summary: "Get ref of schools from education."
"""
helps['education education-user list-school'] = """
type: command
short-summary: "Get schools from education."
"""
helps['education education-user set-ref-user'] = """
type: command
short-summary: "Update the ref of navigation property user in education."
"""
helps['education education-user show-ref-user'] = """
type: command
short-summary: "Get ref of user from education."
"""
helps['education education-user show-user'] = """
type: command
short-summary: "Get user from education."
"""
| 43.945978
| 121
| 0.654301
| 4,823
| 36,607
| 4.963094
| 0.052664
| 0.057651
| 0.062456
| 0.05669
| 0.979655
| 0.97389
| 0.929106
| 0.900907
| 0.889418
| 0.872875
| 0
| 0.004686
| 0.253777
| 36,607
| 832
| 122
| 43.998798
| 0.871582
| 0.012839
| 0
| 0.854885
| 0
| 0.146552
| 0.961268
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.051724
| 0.001437
| 0
| 0.001437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
e0377399789db3921cd770fd9836a624b22eb9da
| 180
|
py
|
Python
|
AbstractContext.py
|
nerdsupremacist/LlamaLang
|
84484b03eb24f326d7889c9dd4e9605d4eca9f8d
|
[
"MIT"
] | 5
|
2019-04-03T12:27:37.000Z
|
2022-02-18T08:20:12.000Z
|
AbstractContext.py
|
nerdsupremacist/LlamaLang
|
84484b03eb24f326d7889c9dd4e9605d4eca9f8d
|
[
"MIT"
] | 7
|
2016-05-26T13:46:33.000Z
|
2016-06-15T20:57:15.000Z
|
AbstractContext.py
|
mathiasquintero/LlamaLang
|
84484b03eb24f326d7889c9dd4e9605d4eca9f8d
|
[
"MIT"
] | null | null | null |
class AbstractContext(object):
def valueForVar(self, name):
return
def setValueForVar(self, name, ex):
return
def typeForVar(self, name):
return
| 25.714286
| 39
| 0.633333
| 19
| 180
| 6
| 0.578947
| 0.210526
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 180
| 7
| 40
| 25.714286
| 0.876923
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.428571
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e044433cf9fa74390c5df16c593bb3908e704919
| 376
|
py
|
Python
|
hiector/ssrdd/utils/box/ext/rbbox_overlap_gpu/__init__.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | 3
|
2022-03-15T11:19:27.000Z
|
2022-03-24T15:59:49.000Z
|
hiector/ssrdd/utils/box/ext/rbbox_overlap_gpu/__init__.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | null | null | null |
hiector/ssrdd/utils/box/ext/rbbox_overlap_gpu/__init__.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | null | null | null |
try:
from .rbbox_overlap import rbbox_overlaps as rbbox_iou
from .rbbox_overlap import rotate_gpu_nms as rbbox_nms
except ImportError:
import os
import sys
sys.path.insert(1, os.path.dirname(__file__))
from rbbox_overlap_gpu.rbbox_overlap import rbbox_overlaps as rbbox_iou
from rbbox_overlap_gpu.rbbox_overlap import rotate_gpu_nms as rbbox_nms
| 31.333333
| 75
| 0.792553
| 59
| 376
| 4.677966
| 0.338983
| 0.26087
| 0.231884
| 0.15942
| 0.73913
| 0.73913
| 0.73913
| 0.65942
| 0.65942
| 0.413043
| 0
| 0.003195
| 0.167553
| 376
| 11
| 76
| 34.181818
| 0.878594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.777778
| 0
| 0.777778
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e057a21a06c034334c86ae53bc546f4224e5491f
| 259
|
py
|
Python
|
cms/extensions/__init__.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 5,659
|
2015-01-01T02:42:30.000Z
|
2020-10-07T02:38:29.000Z
|
cms/extensions/__init__.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 3,264
|
2015-01-02T10:11:48.000Z
|
2020-10-08T13:15:07.000Z
|
cms/extensions/__init__.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 2,132
|
2015-01-01T11:28:21.000Z
|
2020-10-06T09:09:11.000Z
|
from .admin import PageExtensionAdmin # nopyflakes
from .admin import TitleExtensionAdmin # nopyflakes
from .extension_pool import extension_pool # nopyflakes
from .models import PageExtension # nopyflakes
from .models import TitleExtension # nopyflakes
| 43.166667
| 56
| 0.826255
| 27
| 259
| 7.851852
| 0.407407
| 0.264151
| 0.141509
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 259
| 5
| 57
| 51.8
| 0.946429
| 0.208494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e05dd2976a7abb4a190f553b8b4063619c02745c
| 77,291
|
py
|
Python
|
nnAudio2/Spectrogram.py
|
WangHelin1997/nnAudio_plus
|
8044eb3bb89d69aef33472e0e62a7b2533aca762
|
[
"MIT"
] | 17
|
2020-06-21T02:15:15.000Z
|
2022-03-23T00:57:47.000Z
|
nnAudio2/Spectrogram.py
|
WangHelin1997/nnAudio_plus
|
8044eb3bb89d69aef33472e0e62a7b2533aca762
|
[
"MIT"
] | 2
|
2020-06-15T03:38:31.000Z
|
2020-11-27T12:09:06.000Z
|
nnAudio2/Spectrogram.py
|
WangHelin1997/nnAudio_plus
|
8044eb3bb89d69aef33472e0e62a7b2533aca762
|
[
"MIT"
] | 4
|
2020-05-30T18:54:22.000Z
|
2022-03-23T00:57:49.000Z
|
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d
import numpy as np
import torch
from time import time
import math
from scipy.signal import get_window
from scipy import signal
from scipy import fft
import warnings
from .librosa_filters import mel,gammatone # Use it for PyPip
# from librosa_filters import mel # Use it for debug
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
# ---------------------------Filter design -----------------------------------
def create_lowpass_filter(band_center=0.5, kernelLength=256, transitionBandwidth=0.03):
"""
calculate the highest frequency we need to preserve and the lowest frequency we allow to pass through. Note that frequency is on a scale from 0 to 1 where 0 is 0 and 1 is Nyquist frequency of the signal BEFORE downsampling
"""
# transitionBandwidth = 0.03
passbandMax = band_center / (1 + transitionBandwidth)
stopbandMin = band_center * (1 + transitionBandwidth)
# Unlike the filter tool we used online yesterday, this tool does
# not allow us to specify how closely the filter matches our
# specifications. Instead, we specify the length of the kernel.
# The longer the kernel is, the more precisely it will match.
# kernelLength = 256
# We specify a list of key frequencies for which we will require
# that the filter match a specific output gain.
# From [0.0 to passbandMax] is the frequency range we want to keep
# untouched and [stopbandMin, 1.0] is the range we want to remove
keyFrequencies = [0.0, passbandMax, stopbandMin, 1.0]
# We specify a list of output gains to correspond to the key
# frequencies listed above.
# The first two gains are 1.0 because they correspond to the first
# two key frequencies. the second two are 0.0 because they
# correspond to the stopband frequencies
gainAtKeyFrequencies = [1.0, 1.0, 0.0, 0.0]
# This command produces the filter kernel coefficients
filterKernel = signal.firwin2(kernelLength, keyFrequencies, gainAtKeyFrequencies)
return filterKernel.astype(np.float32)
def downsampling_by_n(x, filterKernel, n):
"""A helper function that downsamples the audio by a arbitary factor n. It is used in CQT2010 and CQT2010v2
Parameters
----------
x : torch.Tensor
The input waveform in ``torch.Tensor`` type with shape ``(batch, 1, len_audio)``
filterKernel : str
Filter kernel in ``torch.Tensor`` type with shape ``(1, 1, len_kernel)``
n : int
The downsampling factor
Returns
-------
torch.Tensor
The downsampled waveform
Examples
--------
>>> x_down = downsampling_by_n(x, filterKernel)
"""
x = conv1d(x,filterKernel,stride=n, padding=(filterKernel.shape[-1]-1)//2)
return x
def downsampling_by_2(x, filterKernel):
"""A helper function that downsamples the audio by half. It is used in CQT2010 and CQT2010v2
Parameters
----------
x : torch.Tensor
The input waveform in ``torch.Tensor`` type with shape ``(batch, 1, len_audio)``
filterKernel : str
Filter kernel in ``torch.Tensor`` type with shape ``(1, 1, len_kernel)``
Returns
-------
torch.Tensor
The downsampled waveform
Examples
--------
>>> x_down = downsampling_by_2(x, filterKernel)
"""
x = conv1d(x,filterKernel,stride=2, padding=(filterKernel.shape[-1]-1)//2)
return x
## Basic tools for computation ##
def nextpow2(A):
"""A helper function to calculate the next nearest number to the power of 2.
Parameters
----------
A : float
A float number that is going to be rounded up to the nearest power of 2
Returns
-------
int
The nearest power of 2 to the input number ``A``
Examples
--------
>>> nextpow2(6)
8
"""
return int(np.ceil(np.log2(A)))
def complex_mul(cqt_filter, stft):
"""Since PyTorch does not support complex numbers and its operation. We need to write our own complex multiplication function. This one is specially designed for CQT usage
Parameters
----------
cqt_filter : tuple of torch.Tensor
The tuple is in the format of ``(real_torch_tensor, imag_torch_tensor)``
Returns
-------
tuple of torch.Tensor
The output is in the format of ``(real_torch_tensor, imag_torch_tensor)``
"""
cqt_filter_real = cqt_filter[0]
cqt_filter_imag = cqt_filter[1]
fourier_real = stft[0]
fourier_imag = stft[1]
CQT_real = torch.matmul(cqt_filter_real, fourier_real) - torch.matmul(cqt_filter_imag, fourier_imag)
CQT_imag = torch.matmul(cqt_filter_real, fourier_imag) + torch.matmul(cqt_filter_imag, fourier_real)
return CQT_real, CQT_imag
def broadcast_dim(x):
"""
Auto broadcast input so that it can fits into a Conv1d
"""
if x.dim() == 2:
x = x[:, None, :]
elif x.dim() == 1:
# If nn.DataParallel is used, this broadcast doesn't work
x = x[None, None, :]
elif x.dim() == 3:
pass
else:
raise ValueError("Only support input with shape = (batch, len) or shape = (len)")
return x
def broadcast_dim_conv2d(x):
"""
Auto broadcast input so that it can fits into a Conv2d
"""
if x.dim() == 3:
x = x[:, None, :,:]
else:
raise ValueError("Only support input with shape = (batch, len) or shape = (len)")
return x
## Kernal generation functions ##
def create_fourier_kernels(n_fft, freq_bins=None, fmin=50,fmax=6000, sr=44100, freq_scale='linear', window='hann'):
""" This function creates the Fourier Kernel for STFT, Melspectrogram and CQT. Most of the parameters follow librosa conventions. Part of the code comes from pytorch_musicnet. https://github.com/jthickstun/pytorch_musicnet
Parameters
----------
n_fft : int
The window size
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
freq_scale: 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When 'linear' or 'log' is used, the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will start at 0Hz and end at Nyquist frequency with linear spacing.
Returns
-------
wsin : numpy.array
Imaginary Fourier Kernel with the shape ``(freq_bins, 1, n_fft)``
wcos : numpy.array
Real Fourier Kernel with the shape ``(freq_bins, 1, n_fft)``
bins2freq : list
Mapping each frequency bin to frequency in Hz.
binslist : list
The normalized frequency ``k`` in digital domain. This ``k`` is in the Discrete Fourier Transform equation $$
"""
if freq_bins==None:
freq_bins = n_fft//2+1
s = np.arange(0, n_fft, 1.)
wsin = np.empty((freq_bins,1,n_fft))
wcos = np.empty((freq_bins,1,n_fft))
start_freq = fmin
end_freq = fmax
bins2freq = []
binslist = []
# num_cycles = start_freq*d/44000.
# scaling_ind = np.log(end_freq/start_freq)/k
# Choosing window shape
window_mask = get_window(window,int(n_fft), fftbins=True)
if freq_scale == 'linear':
print("sampling rate = {}. Please make sure the sampling rate is correct in order to get a valid freq range".format(sr))
start_bin = start_freq*n_fft/sr
scaling_ind = (end_freq-start_freq)*(n_fft/sr)/freq_bins
for k in range(freq_bins): # Only half of the bins contain useful info
# print("linear freq = {}".format((k*scaling_ind+start_bin)*sr/n_fft))
bins2freq.append((k*scaling_ind+start_bin)*sr/n_fft)
binslist.append((k*scaling_ind+start_bin))
wsin[k,0,:] = window_mask*np.sin(2*np.pi*(k*scaling_ind+start_bin)*s/n_fft)
wcos[k,0,:] = window_mask*np.cos(2*np.pi*(k*scaling_ind+start_bin)*s/n_fft)
elif freq_scale == 'log':
print("sampling rate = {}. Please make sure the sampling rate is correct in order to get a valid freq range".format(sr))
start_bin = start_freq*n_fft/sr
scaling_ind = np.log(end_freq/start_freq)/freq_bins
for k in range(freq_bins): # Only half of the bins contain useful info
# print("log freq = {}".format(np.exp(k*scaling_ind)*start_bin*sr/n_fft))
bins2freq.append(np.exp(k*scaling_ind)*start_bin*sr/n_fft)
binslist.append((np.exp(k*scaling_ind)*start_bin))
wsin[k,0,:] = window_mask*np.sin(2*np.pi*(np.exp(k*scaling_ind)*start_bin)*s/n_fft)
wcos[k,0,:] = window_mask*np.cos(2*np.pi*(np.exp(k*scaling_ind)*start_bin)*s/n_fft)
elif freq_scale == 'no':
for k in range(freq_bins): # Only half of the bins contain useful info
bins2freq.append(k*sr/n_fft)
binslist.append(k)
wsin[k,0,:] = window_mask*np.sin(2*np.pi*k*s/n_fft)
wcos[k,0,:] = window_mask*np.cos(2*np.pi*k*s/n_fft)
else:
print("Please select the correct frequency scale, 'linear' or 'log'")
return wsin.astype(np.float32),wcos.astype(np.float32), bins2freq, binslist
def create_cqt_kernels(Q, fs, fmin, n_bins=84, bins_per_octave=12, norm=1, window='hann', fmax=None, topbin_check=True):
"""
Automatically create CQT kernels and convert it to frequency domain
"""
# norm arg is not functioning
fftLen = 2**nextpow2(np.ceil(Q * fs / fmin))
# minWin = 2**nextpow2(np.ceil(Q * fs / fmax))
if (fmax != None) and (n_bins == None):
n_bins = np.ceil(bins_per_octave * np.log2(fmax / fmin)) # Calculate the number of bins
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
elif (fmax == None) and (n_bins != None):
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
else:
warnings.warn('If fmax is given, n_bins will be ignored',SyntaxWarning)
n_bins = np.ceil(bins_per_octave * np.log2(fmax / fmin)) # Calculate the number of bins
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
if np.max(freqs) > fs/2 and topbin_check==True:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, please reduce the n_bins'.format(np.max(freqs)))
tempKernel = np.zeros((int(n_bins), int(fftLen)), dtype=np.complex64)
specKernel = np.zeros((int(n_bins), int(fftLen)), dtype=np.complex64)
for k in range(0, int(n_bins)):
freq = freqs[k]
l = np.ceil(Q * fs / freq)
lenghts = np.ceil(Q * fs / freqs)
# Centering the kernels
if l%2==1: # pad more zeros on RHS
start = int(np.ceil(fftLen / 2.0 - l / 2.0))-1
else:
start = int(np.ceil(fftLen / 2.0 - l / 2.0))
sig = get_window(window,int(l), fftbins=True)*np.exp(np.r_[-l//2:l//2]*1j*2*np.pi*freq/fs)/l
if norm: # Normalizing the filter # Trying to normalize like librosa
tempKernel[k, start:start + int(l)] = sig/np.linalg.norm(sig, norm)
else:
tempKernel[k, start:start + int(l)] = sig
# specKernel[k, :] = fft(tempKernel[k])
# return specKernel[:,:fftLen//2+1], fftLen, torch.tensor(lenghts).float()
return tempKernel, fftLen, torch.tensor(lenghts).float()
def create_cqt_kernels_t(Q, fs, fmin, n_bins=84, bins_per_octave=12, norm=1, window='hann', fmax=None):
"""
Create cqt kernels in time-domain
"""
# norm arg is not functioning
fftLen = 2**nextpow2(np.ceil(Q * fs / fmin))
# minWin = 2**nextpow2(np.ceil(Q * fs / fmax))
if (fmax != None) and (n_bins == None):
n_bins = np.ceil(bins_per_octave * np.log2(fmax / fmin)) # Calculate the number of bins
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
elif (fmax == None) and (n_bins != None):
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
else:
warnings.warn('If fmax is given, n_bins will be ignored',SyntaxWarning)
n_bins = np.ceil(bins_per_octave * np.log2(fmax / fmin)) # Calculate the number of bins
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
if np.max(freqs) > fs/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, please reduce the n_bins'.format(np.max(freqs)))
tempKernel = np.zeros((int(n_bins), int(fftLen)), dtype=np.complex64)
specKernel = np.zeros((int(n_bins), int(fftLen)), dtype=np.complex64)
for k in range(0, int(n_bins)):
freq = freqs[k]
l = np.ceil(Q * fs / freq)
lenghts = np.ceil(Q * fs / freqs)
# Centering the kernels
if l%2==1: # pad more zeros on RHS
start = int(np.ceil(fftLen / 2.0 - l / 2.0))-1
else:
start = int(np.ceil(fftLen / 2.0 - l / 2.0))
sig = get_window(window,int(l), fftbins=True)*np.exp(np.r_[-l//2:l//2]*1j*2*np.pi*freq/fs)/l
if norm: # Normalizing the filter # Trying to normalize like librosa
tempKernel[k, start:start + int(l)] = sig/np.linalg.norm(sig, norm)
else:
tempKernel[k, start:start + int(l)] = sig
# specKernel[k, :]=fft(conj(tempKernel[k, :]))
return tempKernel, fftLen, torch.tensor(lenghts).float()
### ------------------Spectrogram Classes---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used, the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``
output_format : str
Determine the return type. ``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if 'Magnitude' is used as the ``output_format``; Shape = ``(num_samples, freq_bins,time_steps, 2)`` if 'Complex' or 'Phase' are used as the ``output_format``
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50,fmax=6000, sr=22050, trainable=False, output_format='Magnitude', verbose=True, device='cuda:0'):
self.trainable = trainable
super(STFT, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.trainable = trainable
self.output_format=output_format
self.device = device
start = time()
# Create filter windows for stft
wsin, wcos, self.bins2freq, self.bin_list = create_fourier_kernels(n_fft, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin,fmax=fmax, sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float, device=self.device)
self.wcos = torch.tensor(wcos, dtype=torch.float, device=self.device)
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
self.wsin = torch.nn.Parameter(self.wsin, requires_grad=self.trainable)
self.wcos = torch.nn.Parameter(self.wcos, requires_grad=self.trainable)
# if self.trainable==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
if self.output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif self.output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif self.output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 helps remove -0.0 elements, which leads to error in calcuating pahse
# This part is for implementing the librosa.core.magphase
# But it seems it is not useful
# phase_real = torch.cos(torch.atan2(spec_imag,spec_real))
# phase_imag = torch.sin(torch.atan2(spec_imag,spec_real))
# return torch.stack((phase_real,phase_imag), -1)
def manual_forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride).pow(2)
real = conv1d(x, self.wcos, stride=self.stride).pow(2) # Doing STFT by using conv1d
return real, imag
class DFT(torch.nn.Module):
"""
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50,fmax=6000, sr=22050):
super(DFT, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin,fmax=fmax, sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
#ifft = e^(+2\pi*j)*X
#ifft(X_real) = (a1, a2)
#ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT_complex_2d(torch.nn.Module):
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50,fmax=6000, sr=22050):
super(iSTFT_complex_2d, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin,fmax=fmax, sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
self.wsin = self.wsin[:,:,:,None] #adjust the filter shape to fit into 2d Conv
self.wcos = self.wcos[:,:,:,None]
def forward(self,x_real,x_imag):
x_real = broadcast_dim_conv2d(x_real)
x_imag = broadcast_dim_conv2d(x_imag) # taking conjuate
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
#ifft = e^(+2\pi*j)*X
#ifft(X_real) = (a1, a2)
#ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv2d(x_real, self.wcos, stride=(1,1))
a2 = conv2d(x_real, self.wsin, stride=(1,1))
b1 = conv2d(x_imag, self.wcos, stride=(1,1))
b2 = conv2d(x_imag, self.wsin, stride=(1,1))
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins. Default value is 128
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic. The default value is ``False``
fmin : int
The starting frequency for the lowest Mel filter bank
fmax : int
The ending frequency for the highest Mel filter bank
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel filter banks will also be caluclated and the Mel filter banks will be updated during model training. Default value is ``False``
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, device='cuda:0'):
super(MelSpectrogram, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.device = device
self.power = power
# Create filter windows for stft
start = time()
wsin, wcos, self.bins2freq, _ = create_fourier_kernels(n_fft, freq_bins=None, window=window, freq_scale='no', sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float, device=self.device)
self.wcos = torch.tensor(wcos, dtype=torch.float, device=self.device)
# Creating kenral for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
self.mel_basis = torch.tensor(mel_basis, device=self.device)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
# Making everything nn.Prarmeter, so that this model can support nn.DataParallel
self.mel_basis = torch.nn.Parameter(self.mel_basis, requires_grad=trainable_mel)
self.wsin = torch.nn.Parameter(self.wsin, requires_grad=trainable_STFT)
self.wcos = torch.nn.Parameter(self.wcos, requires_grad=trainable_STFT)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
spec = torch.sqrt(conv1d(x, self.wsin, stride=self.stride).pow(2) \
+ conv1d(x, self.wcos, stride=self.stride).pow(2))**self.power # Doing STFT by using conv1d
melspec = torch.matmul(self.mel_basis, spec)
return melspec
class Gammatonegram(torch.nn.Module):
"""This function is to calculate the Gammatonegram of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Gammatonegram filter banks. The filter banks maps the n_fft to Gammatone bins. Default value is 64
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic. The default value is ``False``
fmin : int
The starting frequency for the lowest Gammatone filter bank
fmax : int
The ending frequency for the highest Gammatone filter bank
trainable_mel : bool
Determine if the Gammatone filter banks are trainable or not. If ``True``, the gradients for Mel filter banks will also be caluclated and the Mel filter banks will be updated during model training. Default value is ``False``
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.Gammatonegram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=44100, n_fft=2048, n_bins=64, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=20.0, fmax=None, norm=1, trainable_bins=False, trainable_STFT=False, verbose=True, device='cuda:0'):
super(Gammatonegram, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.device = device
self.power = power
# Create filter windows for stft
start = time()
wsin, wcos, self.bins2freq, _ = create_fourier_kernels(n_fft, freq_bins=None, window=window, freq_scale='no', sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float, device=self.device)
self.wcos = torch.tensor(wcos, dtype=torch.float, device=self.device)
# Creating kenral for Gammatone spectrogram
start = time()
gammatone_basis = gammatone(sr, n_fft, n_bins, fmin, fmax)
self.gammatone_basis = torch.tensor(gammatone_basis, device=self.device)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Gammatone filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
# Making everything nn.Prarmeter, so that this model can support nn.DataParallel
self.gammatone_basis = torch.nn.Parameter(self.gammatone_basis, requires_grad=trainable_bins)
self.wsin = torch.nn.Parameter(self.wsin, requires_grad=trainable_STFT)
self.wcos = torch.nn.Parameter(self.wcos, requires_grad=trainable_STFT)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
spec = torch.sqrt(conv1d(x, self.wsin, stride=self.stride).pow(2) \
+ conv1d(x, self.wcos, stride=self.stride).pow(2))**self.power # Doing STFT by using conv1d
gammatonespec = torch.matmul(self.gammatone_basis, spec)
return gammatonespec
class MelSpectrogramv2(torch.nn.Module):
"""This is an experimental feature using torch.stft when trainable is not needed. Somehow it is slower?"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, device='cuda:0'):
super(MelSpectrogramv2, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.trainable_STFT=trainable_STFT
self.device = device
# Create filter windows for stft
if self.trainable_STFT==True:
start = time()
wsin, wcos, self.bins2freq, _ = create_fourier_kernels(n_fft, freq_bins=None, window=window, freq_scale='no', sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float, device=self.device)
self.wcos = torch.tensor(wcos, dtype=torch.float, device=self.device)
self.wsin = torch.nn.Parameter(self.wsin)
self.wcos = torch.nn.Parameter(self.wcos)
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
else:
window = get_window(window,int(n_fft), fftbins=True).astype(np.float32)
self.window = torch.tensor(window, device=self.device)
# Creating kenral for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
self.mel_basis = torch.tensor(mel_basis, device=self.device)
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
if trainable_mel==True:
self.mel_basis = torch.nn.Parameter(self.mel_basis)
def forward(self,x):
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
if self.trainable_STFT==False:
x = padding(x)
spec_complex = torch.stft(x, self.n_fft, self.stride, window=self.window)
spec = spec_complex[:,:,:,0].pow(2) + spec_complex[:,:,:,1].pow(2)
else:
x = broadcast_dim(x)
x = padding(x)
spec = conv1d(x, self.wsin, stride=self.stride).pow(2) \
+ conv1d(x, self.wcos, stride=self.stride).pow(2) # Doing STFT by using conv1d
melspec = torch.matmul(self.mel_basis, spec)
return melspec
class CQT1992(torch.nn.Module):
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', device="cuda:0"):
super(CQT1992, self).__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.device = device
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
self.cqt_kernels, self.kernal_width, self.lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax)
self.lenghts = self.lenghts.to(device)
self.cqt_kernels = fft(self.cqt_kernels)[:,:self.kernal_width//2+1]
self.cqt_kernels_real = torch.tensor(self.cqt_kernels.real.astype(np.float32), device=device)
self.cqt_kernels_imag = torch.tensor(self.cqt_kernels.imag.astype(np.float32), device=device)
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernal_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernal_width
print("Creating STFT kernels ...", end='\r')
start = time()
wsin, wcos, self.bins2freq, _ = create_fourier_kernels(self.kernal_width, window='ones', freq_scale='no')
self.wsin = torch.tensor(wsin, device=device)
self.wcos = torch.tensor(wcos, device=device)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernal_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernal_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
if self.norm:
return CQT/self.kernal_width*torch.sqrt(self.lenghts.view(-1,1))
else:
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010(torch.nn.Module):
"""
This alogrithm is using the resampling method proposed in [1]. Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the input audio by a factor of 2 to convoluting it with the small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size. The result with and without early downsampling are more or less the same except in the very low frequency region where freq < 40Hz
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, device='cuda:0'):
super(CQT2010, self).__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
#basis_norm is for normlaizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate eraly downsampling later if possible
self.device = device
Q = 1/(2**(1/bins_per_octave)-1) # It will be used to calculate filter_cutoff and creating CQT kernels
# Creating lowpass filter and make it a torch tensor
print("Creating low pass filter ...", end='\r')
start = time()
self.lowpass_filter = torch.tensor(
create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001), device=self.device)
self.lowpass_filter = self.lowpass_filter[None,None,:] # Broadcast the tensor to the shape that fits conv1d
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) # Calculate the top bin frequency
else:
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) # Calculate the top bin frequency
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, self.early_downsample_filter, self.earlydownsample = self.get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose)
print("Early downsampling filter created, time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
self.lenghts = torch.tensor(lenghts, device=self.device).float()
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
self.cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32), device=self.device) # These cqt_kernal is already in the frequency domain
self.cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32), device=self.device)
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
print("Creating STFT kernels ...", end='\r')
start = time()
wsin, wcos, self.bins2freq, _ = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
self.wsin = torch.tensor(wsin, device=self.device)
self.wcos = torch.tensor(wcos, device=self.device)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
# If center==True, the STFT window will be put in the middle, and paddings at the beginning and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def get_cqt(self,x,hop_length, padding):
"""Multiplying the STFT result with the cqt_kernal, check out the 1992 CQT paper [1] for how to multiple the STFT result with the CQT kernel
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992)."""
# STFT, converting the audio input from time domain to frequency domain
try:
x = padding(x) # When center == True, we need padding at the beginning and ending
except:
print("padding with reflection mode might not be the best choice, try using constant padding")
fourier_real = conv1d(x, self.wcos, stride=hop_length)
fourier_imag = conv1d(x, self.wsin, stride=hop_length)
# Multiplying input with the CQT kernel in freq domain
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT
def get_early_downsample_params(self, sr, hop_length, fmax_t, Q, n_octaves, verbose):
window_bandwidth = 1.5 # for hann window
filter_cutoff = fmax_t * (1 + 0.5 * window_bandwidth / Q)
sr, hop_length, downsample_factor=self.early_downsample(sr, hop_length, n_octaves, sr//2, filter_cutoff)
if downsample_factor != 1:
print("Can do early downsample, factor = ", downsample_factor)
earlydownsample=True
# print("new sr = ", sr)
# print("new hop_length = ", hop_length)
early_downsample_filter = create_lowpass_filter(band_center=1/downsample_factor, kernelLength=256, transitionBandwidth=0.03)
early_downsample_filter = torch.tensor(early_downsample_filter, device=self.device)[None, None, :]
else:
print("No early downsampling is required, downsample_factor = ", downsample_factor)
early_downsample_filter = None
earlydownsample=False
return sr, hop_length, downsample_factor, early_downsample_filter, earlydownsample
# The following two downsampling count functions are obtained from librosa CQT
# They are used to determine the number of pre resamplings if the starting and ending frequency are both in low frequency regions.
def early_downsample_count(self, nyquist, filter_cutoff, hop_length, n_octaves):
'''Compute the number of early downsampling operations'''
downsample_count1 = max(0, int(np.ceil(np.log2(0.85 * nyquist /
filter_cutoff)) - 1) - 1)
# print("downsample_count1 = ", downsample_count1)
num_twos = nextpow2(hop_length)
downsample_count2 = max(0, num_twos - n_octaves + 1)
# print("downsample_count2 = ",downsample_count2)
return min(downsample_count1, downsample_count2)
def early_downsample(self, sr, hop_length, n_octaves,
nyquist, filter_cutoff):
'''Return new sampling rate and hop length after early dowansampling'''
downsample_count = self.early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves)
# print("downsample_count = ", downsample_count)
downsample_factor = 2**(downsample_count)
hop_length //= downsample_factor # Getting new hop_length
new_sr = sr / float(downsample_factor) # Getting new sampling rate
sr = new_sr
return sr, hop_length, downsample_factor
def forward(self,x):
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = self.get_cqt(x, hop, self.padding) #Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = self.get_cqt(x_down, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1) #
CQT = CQT[:,-self.n_bins:,:] #Removing unwanted top bins
CQT = CQT*2**(self.n_octaves-1) #Normalizing signals with respect to n_fft
CQT = CQT*self.downsample_factor/2**(self.n_octaves-1) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it same mag as 1992
if self.norm:
return CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1))
else:
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization. Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels will also be caluclated and the CQT kernels will be updated during model training. Default value is ``False``
output_format : str
Determine the return type. ``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if 'Magnitude' is used as the ``output_format``; Shape = ``(num_samples, freq_bins,time_steps, 2)`` if 'Complex' or 'Phase' are used as the ``output_format``
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True, device='cuda:0'):
super(CQT1992v2, self).__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
self.device = device
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
self.cqt_kernels, self.kernal_width, self.lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax)
self.lenghts = self.lenghts.to(device)
self.cqt_kernels_real = torch.tensor(self.cqt_kernels.real, device=self.device).unsqueeze(1)
self.cqt_kernels_imag = torch.tensor(self.cqt_kernels.imag, device=self.device).unsqueeze(1)
# Making everything a Parameter to support nn.DataParallel
self.cqt_kernels_real = torch.nn.Parameter(self.cqt_kernels_real, requires_grad=trainable)
self.cqt_kernels_imag = torch.nn.Parameter(self.cqt_kernels_imag, requires_grad=trainable)
self.lenghts = torch.nn.Parameter(self.lenghts, requires_grad=False)
# if trainable==True:
# self.cqt_kernels_real = torch.nn.Parameter(self.cqt_kernels_real)
# self.cqt_kernels_imag = torch.nn.Parameter(self.cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernal_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernal_width
def forward(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernal_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernal_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)*torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)*torch.sqrt(self.lenghts.view(-1,1))
if self.output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif self.output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif self.output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernal_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernal_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1]. Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the input audio by a factor of 2 to convoluting it with the small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size. The result with and without early downsampling are more or less the same except in the very low frequency region where freq < 40Hz
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization. Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels will also be caluclated and the CQT kernels will be updated during model training. Default value is ``False``
output_format : str
Determine the return type. 'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; 'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; 'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if 'Magnitude' is used as the ``output_format``; Shape = ``(num_samples, freq_bins,time_steps, 2)`` if 'Complex' or 'Phase' are used as the ``output_format``
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True, device='cuda:0'):
super(CQT2010v2, self).__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
#basis_norm is for normlaizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate eraly downsampling later if possible
self.trainable = trainable
self.output_format = output_format
self.device = device
Q = 1/(2**(1/bins_per_octave)-1) # It will be used to calculate filter_cutoff and creating CQT kernels
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
self.lowpass_filter = torch.tensor(
create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001), device=self.device)
self.lowpass_filter = self.lowpass_filter[None,None,:] # Broadcast the tensor to the shape that fits conv1d
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) # Calculate the top bin frequency
else:
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) # Calculate the top bin frequency
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, self.early_downsample_filter, self.earlydownsample = self.get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose)
if verbose==True:
print("Early downsampling filter created, time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, self.lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
self.lenghts = torch.tensor(lenghts,device=self.device).float()
self.basis = basis
self.cqt_kernels_real = torch.tensor(basis.real.astype(np.float32),device=self.device).unsqueeze(1) # These cqt_kernal is already in the frequency domain
self.cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32),device=self.device).unsqueeze(1)
# Making them nn.Parameter so that the model can support nn.DataParallel
self.cqt_kernels_real = torch.nn.Parameter(self.cqt_kernels_real, requires_grad=self.trainable)
self.cqt_kernels_imag = torch.nn.Parameter(self.cqt_kernels_imag, requires_grad=self.trainable)
self.lenghts = torch.nn.Parameter(self.lenghts, requires_grad=False)
self.lowpass_filter = torch.nn.Parameter(self.lowpass_filter, requires_grad=False)
# if trainable==True:
# self.cqt_kernels_real = torch.nn.Parameter(self.cqt_kernels_real)
# self.cqt_kernels_imag = torch.nn.Parameter(self.cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def get_cqt(self,x,hop_length, padding):
"""Multiplying the STFT result with the cqt_kernal, check out the 1992 CQT paper [1] for how to multiple the STFT result with the CQT kernel
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992)."""
# STFT, converting the audio input from time domain to frequency domain
try:
x = padding(x) # When center == True, we need padding at the beginning and ending
except:
print("padding with reflection mode might not be the best choice, try using constant padding")
CQT_real = conv1d(x, self.cqt_kernels_real, stride=hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT
def get_cqt_complex(self,x,hop_length, padding):
"""Multiplying the STFT result with the cqt_kernal, check out the 1992 CQT paper [1] for how to multiple the STFT result with the CQT kernel
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992)."""
# STFT, converting the audio input from time domain to frequency domain
try:
x = padding(x) # When center == True, we need padding at the beginning and ending
except:
print("padding with reflection mode might not be the best choice, try using constant padding")
CQT_real = conv1d(x, self.cqt_kernels_real, stride=hop_length)
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=hop_length)
return torch.stack((CQT_real, CQT_imag),-1)
def get_early_downsample_params(self, sr, hop_length, fmax_t, Q, n_octaves, verbose):
window_bandwidth = 1.5 # for hann window
filter_cutoff = fmax_t * (1 + 0.5 * window_bandwidth / Q)
sr, hop_length, downsample_factor=self.early_downsample(sr, hop_length, n_octaves, sr//2, filter_cutoff)
if downsample_factor != 1:
if verbose==True:
print("Can do early downsample, factor = ", downsample_factor)
earlydownsample=True
# print("new sr = ", sr)
# print("new hop_length = ", hop_length)
early_downsample_filter = create_lowpass_filter(band_center=1/downsample_factor, kernelLength=256, transitionBandwidth=0.03)
early_downsample_filter = torch.tensor(early_downsample_filter, device=self.device)[None, None, :]
else:
if verbose==True:
print("No early downsampling is required, downsample_factor = ", downsample_factor)
early_downsample_filter = None
earlydownsample=False
return sr, hop_length, downsample_factor, early_downsample_filter, earlydownsample
# The following two downsampling count functions are obtained from librosa CQT
# They are used to determine the number of pre resamplings if the starting and ending frequency are both in low frequency regions.
def early_downsample_count(self, nyquist, filter_cutoff, hop_length, n_octaves):
'''Compute the number of early downsampling operations'''
downsample_count1 = max(0, int(np.ceil(np.log2(0.85 * nyquist /
filter_cutoff)) - 1) - 1)
# print("downsample_count1 = ", downsample_count1)
num_twos = nextpow2(hop_length)
downsample_count2 = max(0, num_twos - n_octaves + 1)
# print("downsample_count2 = ",downsample_count2)
return min(downsample_count1, downsample_count2)
def early_downsample(self, sr, hop_length, n_octaves,
nyquist, filter_cutoff):
'''Return new sampling rate and hop length after early dowansampling'''
downsample_count = self.early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves)
# print("downsample_count = ", downsample_count)
downsample_factor = 2**(downsample_count)
hop_length //= downsample_factor # Getting new hop_length
new_sr = sr / float(downsample_factor) # Getting new sampling rate
sr = new_sr
return sr, hop_length, downsample_factor
def forward(self,x):
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = self.get_cqt_complex(x, hop, self.padding) #Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = self.get_cqt_complex(x_down, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1) #
CQT = CQT[:,-self.n_bins:,:] #Removing unwanted bottom bins
CQT = CQT*2**(self.n_octaves-1) #Normalizing signals with respect to n_fft
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
CQT = CQT*self.downsample_factor/2**(self.n_octaves-1) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it same mag as 1992
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalize again to get same result as librosa
if self.output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif self.output_format=='Complex':
return CQT
elif self.output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = self.get_cqt(x, hop, self.padding) #Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = self.get_cqt(x_down, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1) #
CQT = CQT[:,-self.n_bins:,:] #Removing unwanted bottom bins
CQT = CQT*2**(self.n_octaves-1) #Normalizing signals with respect to n_fft
# print("downsample_factor = ",self.downsample_factor)
CQT = CQT*self.downsample_factor/2**(self.n_octaves-1) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it same mag as 1992
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT(CQT1992v2):
"""An abbreviation for CQT1992v2. Please refer to the CQT1992v2 documentation"""
pass
| 49.418798
| 484
| 0.638393
| 10,762
| 77,291
| 4.456328
| 0.061884
| 0.00834
| 0.014012
| 0.012511
| 0.881211
| 0.86109
| 0.848642
| 0.836444
| 0.814133
| 0.803895
| 0
| 0.020076
| 0.255657
| 77,291
| 1,563
| 485
| 49.450416
| 0.813544
| 0.426932
| 0
| 0.716216
| 0
| 0
| 0.06623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058108
| false
| 0.032432
| 0.016216
| 0
| 0.147297
| 0.051351
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e068c64670abb9085c5ad0462009e33149206f91
| 10,451
|
py
|
Python
|
google/cloud/networkconnectivity/v1/networkconnectivity-v1-py/google/cloud/networkconnectivity_v1/services/hub_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/networkconnectivity/v1/networkconnectivity-v1-py/google/cloud/networkconnectivity_v1/services/hub_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/networkconnectivity/v1/networkconnectivity-v1-py/google/cloud/networkconnectivity_v1/services/hub_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.cloud.networkconnectivity_v1.types import hub
class ListHubsPager:
"""A pager for iterating through ``list_hubs`` requests.
This class thinly wraps an initial
:class:`google.cloud.networkconnectivity_v1.types.ListHubsResponse` object, and
provides an ``__iter__`` method to iterate through its
``hubs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListHubs`` requests and continue to iterate
through the ``hubs`` field on the
corresponding responses.
All the usual :class:`google.cloud.networkconnectivity_v1.types.ListHubsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., hub.ListHubsResponse],
request: hub.ListHubsRequest,
response: hub.ListHubsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.networkconnectivity_v1.types.ListHubsRequest):
The initial request object.
response (google.cloud.networkconnectivity_v1.types.ListHubsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = hub.ListHubsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[hub.ListHubsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[hub.Hub]:
for page in self.pages:
yield from page.hubs
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListHubsAsyncPager:
"""A pager for iterating through ``list_hubs`` requests.
This class thinly wraps an initial
:class:`google.cloud.networkconnectivity_v1.types.ListHubsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``hubs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListHubs`` requests and continue to iterate
through the ``hubs`` field on the
corresponding responses.
All the usual :class:`google.cloud.networkconnectivity_v1.types.ListHubsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[hub.ListHubsResponse]],
request: hub.ListHubsRequest,
response: hub.ListHubsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.networkconnectivity_v1.types.ListHubsRequest):
The initial request object.
response (google.cloud.networkconnectivity_v1.types.ListHubsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = hub.ListHubsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[hub.ListHubsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[hub.Hub]:
async def async_generator():
async for page in self.pages:
for response in page.hubs:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSpokesPager:
"""A pager for iterating through ``list_spokes`` requests.
This class thinly wraps an initial
:class:`google.cloud.networkconnectivity_v1.types.ListSpokesResponse` object, and
provides an ``__iter__`` method to iterate through its
``spokes`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSpokes`` requests and continue to iterate
through the ``spokes`` field on the
corresponding responses.
All the usual :class:`google.cloud.networkconnectivity_v1.types.ListSpokesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., hub.ListSpokesResponse],
request: hub.ListSpokesRequest,
response: hub.ListSpokesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.networkconnectivity_v1.types.ListSpokesRequest):
The initial request object.
response (google.cloud.networkconnectivity_v1.types.ListSpokesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = hub.ListSpokesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[hub.ListSpokesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[hub.Spoke]:
for page in self.pages:
yield from page.spokes
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSpokesAsyncPager:
"""A pager for iterating through ``list_spokes`` requests.
This class thinly wraps an initial
:class:`google.cloud.networkconnectivity_v1.types.ListSpokesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``spokes`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSpokes`` requests and continue to iterate
through the ``spokes`` field on the
corresponding responses.
All the usual :class:`google.cloud.networkconnectivity_v1.types.ListSpokesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[hub.ListSpokesResponse]],
request: hub.ListSpokesRequest,
response: hub.ListSpokesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.networkconnectivity_v1.types.ListSpokesRequest):
The initial request object.
response (google.cloud.networkconnectivity_v1.types.ListSpokesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = hub.ListSpokesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[hub.ListSpokesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[hub.Spoke]:
async def async_generator():
async for page in self.pages:
for response in page.spokes:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 39.737643
| 95
| 0.662233
| 1,172
| 10,451
| 5.706485
| 0.147611
| 0.057416
| 0.076256
| 0.08134
| 0.903409
| 0.897877
| 0.897877
| 0.897877
| 0.888606
| 0.888606
| 0
| 0.004349
| 0.251938
| 10,451
| 262
| 96
| 39.889313
| 0.851113
| 0.486843
| 0
| 0.796296
| 0
| 0
| 0.008354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.018519
| 0.074074
| 0.314815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16285f15f15feca921cd6cae2e359d745ea3aad4
| 5,354
|
py
|
Python
|
test/test_target.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | 1
|
2021-07-06T23:37:49.000Z
|
2021-07-06T23:37:49.000Z
|
test/test_target.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | null | null | null |
test/test_target.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | null | null | null |
import os
import shutil
import unittest
from datetime import datetime
import boto3
import pandas as pd
from moto import mock_s3
from gokart.target import make_target, make_model_target
def _get_temporary_directory():
return os.path.abspath(os.path.join(os.path.dirname(__name__), 'temporary'))
class LocalTargetTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(_get_temporary_directory(), ignore_errors=True)
def test_save_and_load_pickle_file(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
def test_save_and_load_text_file(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.txt')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')
def test_save_and_load_gzip(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.gz')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')
def test_save_and_load_csv(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_save_and_load_tsv(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.tsv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_last_modified_time(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
t = target.last_modification_time()
self.assertIsInstance(t, datetime)
def test_last_modified_time_without_file(self):
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
with self.assertRaises(FileNotFoundError):
target.last_modification_time()
class S3TargetTest(unittest.TestCase):
@mock_s3
def test_save_on_s3(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
@mock_s3
def test_last_modified_time(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
t = target.last_modification_time()
self.assertIsInstance(t, datetime)
@mock_s3
def test_last_modified_time_without_file(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
with self.assertRaises(FileNotFoundError):
target.last_modification_time()
class ModelTargetTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(_get_temporary_directory(), ignore_errors=True)
@staticmethod
def _save_function(obj, path):
make_target(file_path=path).dump(obj)
@staticmethod
def _load_function(path):
return make_target(file_path=path).load()
def test_model_target_on_loacal(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.zip')
target = make_model_target(
file_path=file_path,
temporary_directory=_get_temporary_directory(),
save_function=self._save_function,
load_function=self._load_function)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
@mock_s3
def test_model_target_on_s3(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.zip')
target = make_model_target(
file_path=file_path,
temporary_directory=_get_temporary_directory(),
save_function=self._save_function,
load_function=self._load_function)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
if __name__ == '__main__':
unittest.main()
| 30.594286
| 87
| 0.655585
| 709
| 5,354
| 4.651622
| 0.141044
| 0.092177
| 0.05943
| 0.05094
| 0.86325
| 0.835052
| 0.8302
| 0.814736
| 0.792905
| 0.792905
| 0
| 0.010631
| 0.226933
| 5,354
| 174
| 88
| 30.770115
| 0.78618
| 0
| 0
| 0.736
| 0
| 0
| 0.052671
| 0
| 0
| 0
| 0
| 0
| 0.096
| 1
| 0.136
| false
| 0
| 0.064
| 0.016
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1653fa786d83a97f4d8a49f3edc2f09dbd67a976
| 49
|
py
|
Python
|
sklearn_quantile/utils/__init__.py
|
jasperroebroek/sklearn-quantile
|
d357240527f32b04b0fec3dcd308bb23de517209
|
[
"BSD-3-Clause"
] | 2
|
2022-02-04T19:31:42.000Z
|
2022-02-08T15:11:41.000Z
|
sklearn_quantile/utils/__init__.py
|
jasperroebroek/sklearn-quantile
|
d357240527f32b04b0fec3dcd308bb23de517209
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn_quantile/utils/__init__.py
|
jasperroebroek/sklearn-quantile
|
d357240527f32b04b0fec3dcd308bb23de517209
|
[
"BSD-3-Clause"
] | null | null | null |
from .weighted_quantile import weighted_quantile
| 24.5
| 48
| 0.897959
| 6
| 49
| 7
| 0.666667
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
16574493ed2c288bb57fe32fb1367dfbba5d7ca9
| 115
|
py
|
Python
|
edgesets/__init__.py
|
GiliardGodoi/edgesets
|
b59a600400972ccc82e5e17f2acbb2b45045b40b
|
[
"MIT"
] | null | null | null |
edgesets/__init__.py
|
GiliardGodoi/edgesets
|
b59a600400972ccc82e5e17f2acbb2b45045b40b
|
[
"MIT"
] | 20
|
2021-11-08T13:02:33.000Z
|
2021-11-29T01:03:40.000Z
|
edgesets/__init__.py
|
GiliardGodoi/edgesets
|
b59a600400972ccc82e5e17f2acbb2b45045b40b
|
[
"MIT"
] | null | null | null |
from .main import EdgeSet
from .main import DEdge
from .main import UEdge
from .evaluation import EvaluateEdgeSet
| 19.166667
| 39
| 0.817391
| 16
| 115
| 5.875
| 0.5
| 0.255319
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 6
| 39
| 19.166667
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.